From 5a4f768d7472dd1524f7c80b8654dbba542996bb Mon Sep 17 00:00:00 2001 From: Rich Parker Date: Tue, 19 Mar 2024 12:43:00 -0400 Subject: [PATCH 01/28] un-commented the blind catalog action to test if other fixes corrected the catalog issue (#1303) --- .../modules/test_zos_data_set_func.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/functional/modules/test_zos_data_set_func.py b/tests/functional/modules/test_zos_data_set_func.py index 0167c1b83..7ab4685c0 100644 --- a/tests/functional/modules/test_zos_data_set_func.py +++ b/tests/functional/modules/test_zos_data_set_func.py @@ -153,9 +153,9 @@ def test_data_set_catalog_and_uncatalog(ansible_zos_module, jcl, volumes_on_syst volume_1 = volumes.get_available_vol() dataset = get_tmp_ds_name(2, 2) try: - # hosts.all.zos_data_set( - # name=dataset, state="cataloged", volumes=volume_1 - # ) + hosts.all.zos_data_set( + name=dataset, state="cataloged", volumes=volume_1 + ) hosts.all.zos_data_set(name=dataset, state="absent") hosts.all.file(path=TEMP_PATH, state="directory") @@ -212,9 +212,9 @@ def test_data_set_present_when_uncataloged(ansible_zos_module, jcl, volumes_on_s volume_1 = volumes.get_available_vol() dataset = get_tmp_ds_name(2, 2) try: - # hosts.all.zos_data_set( - # name=dataset, state="cataloged", volumes=volume_1 - # ) + hosts.all.zos_data_set( + name=dataset, state="cataloged", volumes=volume_1 + ) hosts.all.zos_data_set(name=dataset, state="absent") hosts.all.file(path=TEMP_PATH, state="directory") @@ -258,9 +258,9 @@ def test_data_set_replacement_when_uncataloged(ansible_zos_module, jcl, volumes_ volume = volumes.get_available_vol() dataset = get_tmp_ds_name(2, 2) try: - # hosts.all.zos_data_set( - # name=dataset, state="cataloged", volumes=volume - # ) + hosts.all.zos_data_set( + name=dataset, state="cataloged", volumes=volume + ) hosts.all.zos_data_set(name=dataset, state="absent") hosts.all.file(path=TEMP_PATH, state="directory") @@ -306,9 +306,9 @@ def test_data_set_absent_when_uncataloged(ansible_zos_module, jcl, volumes_on_sy volume_1 = volumes.get_available_vol() hosts = ansible_zos_module dataset = get_tmp_ds_name(2, 2) - # hosts.all.zos_data_set( - # name=dataset, state="cataloged", volumes=volume_1 - # ) + hosts.all.zos_data_set( + name=dataset, state="cataloged", volumes=volume_1 + ) hosts.all.zos_data_set(name=dataset, state="absent") hosts.all.file(path=TEMP_PATH, state="directory") @@ -345,7 +345,7 @@ def test_data_set_absent_when_uncataloged_and_same_name_cataloged_is_present(ans volume_2 = volumes.get_available_vol() hosts = ansible_zos_module dataset = get_tmp_ds_name(2, 2) - # hosts.all.zos_data_set(name=dataset, state="cataloged", volumes=volume_1) + hosts.all.zos_data_set(name=dataset, state="cataloged", volumes=volume_1) hosts.all.zos_data_set(name=dataset, state="absent") From 4c2be29b07d59a7739fa6449fa7fdcaf4c682e33 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 19 Mar 2024 11:24:35 -0600 Subject: [PATCH 02/28] Enabler/692/add changelog lint (#1304) * Create bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Added changelog action * Update changelog.yml * Create close-stale-issues * Update close-stale-issues Quite el workflow dispatch * Create bandit2.yml * Update bandit2.yml * Update zos_copy.py * Update zos_copy.py Me equivoque * Create ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Added ac changelog * added lint as an option * Added documentation to ac_changelog * Changed 'lint' to 'command' on ac_changelog * Create * Create first version of the changelog action * Update changelog.yml * Fix changelog.yml * Change name of action Antsibull 'Changelog lint' to AC Changelog lint * Rename 'changelog.yml' to 'ac_changelog.yml * Create ac_changelog.yml * Update ac_changelog.yml * Update ac_changelog.yml * Update ac_changelog.yml * Change path in 'venv setup' on ac * Change ac_changelog.yml * Change ac_changelog.yml * Change ac_changelog.yml * Change ac_changelog.yml * Removed not required github actions * Update zos_copy.py * Update ac_changelog.yml * Update ac_changelog.yml * Indented steps section * Modified changed line * Added changelog --------- Co-authored-by: Fernando Flores --- .github/workflows/ac_changelog.yml | 39 +++++++++++++++++++ ac | 37 ++++++++++++++++++ .../fragments/692-changelog-lint-ac-tool.yml | 8 ++++ 3 files changed, 84 insertions(+) create mode 100644 .github/workflows/ac_changelog.yml create mode 100644 changelogs/fragments/692-changelog-lint-ac-tool.yml diff --git a/.github/workflows/ac_changelog.yml b/.github/workflows/ac_changelog.yml new file mode 100644 index 000000000..523e207b9 --- /dev/null +++ b/.github/workflows/ac_changelog.yml @@ -0,0 +1,39 @@ +name: AC Changelog Lint + +on: + pull_request: + paths: + - 'changelogs/fragments/*' + branches: + - dev + - staging* + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up python + uses: actions/setup-python@v5 + with: + python-version: 3.11 + + - name: Set up venv + run: | + python -m pip install --upgrade pip + pip install virtualenv + mkdir venv + virtualenv venv/venv-2.16 + + - name: Install dependencies + run: | + source venv/venv-2.16/bin/activate + pip install antsibull-changelog + + - name: Run ac-changelog + run: | + source venv/venv-2.16/bin/activate + ./ac --ac-changelog --command lint diff --git a/ac b/ac index dad00194c..bb307f4a6 100755 --- a/ac +++ b/ac @@ -241,6 +241,32 @@ ac_build(){ $VENV_BIN/ansible-galaxy collection install -f ibm-ibm_zos_core-* } +# ------------------------------------------------------------------------------ +# Run a changelog lint locally +# ------------------------------------------------------------------------------ +#->ac-changelog: +## Runs antsibull-changelog to generate the release changelog or perform a lint +## on changelog fragments or release notes. +## Usage: ac [--ac-changelog ] +## - choose from 'init', 'lint', 'lint-changelog-yaml', 'release', 'generate' +## - generate generate the changelog +## - init set up changelog infrastructure for collection, or an other project +## - lint check changelog fragments for syntax errors +## - lint-changelog-yaml check syntax of changelogs/changelog.yaml file +## - release add a new release to the change metadata +## Example: +## $ ac --ac-changelog --command lint +## $ ac --ac-changelog --command release +## $ ac --ac-changelog +ac_changelog(){ + option_command=$1 + if [ ! "$option_command" ]; then + option_command="lint" + fi + message "Running Changelog '$option_command'" + . $VENV_BIN/activate && antsibull-changelog "${option_command}" +} + # ------------------------------------------------------------------------------ # Install an ibm_zos_core collection from galaxy (or how you have ansible.cfg configured) # ------------------------------------------------------------------------------ @@ -653,6 +679,10 @@ while true; do ensure_managed_venv_exists $1 option_submitted="--ac-build" ;; + --ac-changelog) # Command + ensure_managed_venv_exists $1 + option_submitted="--ac-changelog" + ;; --ac-install) ensure_managed_venv_exists $1 # Command option_submitted="--ac-install" @@ -716,6 +746,11 @@ while true; do ensure_managed_venv_exists $1 option_submitted="--venv-stop" ;; + --command|--command=?*) # option + command=`option_processor $1 $2` + option_sanitize $command + shift + ;; --debug|--debug=?*) # option debug=`option_processor $1 $2` option_sanitize $debug @@ -800,6 +835,8 @@ if [ "$option_submitted" ] && [ "$option_submitted" = "--ac-bandit" ] ; then ac_bandit $level elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-build" ] ; then ac_build +elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-changelog" ] ; then + ac_changelog $command elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-install" ] ; then ac_install $version elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-lint" ] ; then diff --git a/changelogs/fragments/692-changelog-lint-ac-tool.yml b/changelogs/fragments/692-changelog-lint-ac-tool.yml new file mode 100644 index 000000000..cbf6bab7d --- /dev/null +++ b/changelogs/fragments/692-changelog-lint-ac-tool.yml @@ -0,0 +1,8 @@ +trivial: + - ac - Added new command ac-changelog into ac tool to run changelog + fragments lint and changelog release generation. + (https://github.com/ansible-collections/ibm_zos_core/pull/1304). + + - workflows/ac_changelog - Added new github action that will lint + changelog fragments upon a new pull request. + (https://github.com/ansible-collections/ibm_zos_core/pull/1304). \ No newline at end of file From 9c5bab3c39214ff2e4c0ab07f28a1624a0d336ae Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Wed, 20 Mar 2024 13:37:33 -0600 Subject: [PATCH 03/28] Changed case sensitive options --- .../functional/modules/test_zos_copy_func.py | 194 +++++++++--------- 1 file changed, 97 insertions(+), 97 deletions(-) diff --git a/tests/functional/modules/test_zos_copy_func.py b/tests/functional/modules/test_zos_copy_func.py index cf7f1494b..bbd598f1c 100644 --- a/tests/functional/modules/test_zos_copy_func.py +++ b/tests/functional/modules/test_zos_copy_func.py @@ -1684,7 +1684,7 @@ def test_copy_seq_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="seq", + type="PDSE", replace=True ) @@ -1733,7 +1733,7 @@ def test_copy_seq_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="seq", + type="PDSE", replace=True ) @@ -1784,7 +1784,7 @@ def test_copy_partitioned_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="pdse", + type="PDSE", replace=True ) @@ -1834,7 +1834,7 @@ def test_copy_partitioned_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="pdse", + type="PDSE", replace=True ) @@ -1884,7 +1884,7 @@ def test_copy_asa_data_set_to_text_file(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="seq", + type="PDSE", record_format="FBA", record_length=80, block_size=27920, @@ -1977,8 +1977,8 @@ def test_copy_dest_lock(ansible_zos_module, ds_type): hosts.all.zos_data_set(name=data_set_1, state="present", type=ds_type, replace=True) hosts.all.zos_data_set(name=data_set_2, state="present", type=ds_type, replace=True) if ds_type == "PDS" or ds_type == "PDSE": - hosts.all.zos_data_set(name=src_data_set, state="present", type="member", replace=True) - hosts.all.zos_data_set(name=dest_data_set, state="present", type="member", replace=True) + hosts.all.zos_data_set(name=src_data_set, state="present", type="MEMBER", replace=True) + hosts.all.zos_data_set(name=dest_data_set, state="present", type="MEMBER", replace=True) # copy text_in source hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(DUMMY_DATA, src_data_set)) # copy/compile c program and copy jcl to hold data set lock for n seconds in background(&) @@ -2266,7 +2266,7 @@ def test_copy_file_to_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="PDSE", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2294,7 +2294,7 @@ def test_copy_file_to_non_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="absent") + hosts.all.zos_data_set(name=dest, type="PDSE", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2432,7 +2432,7 @@ def test_copy_ps_to_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="PDSE", state="present") copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) verify_copy = hosts.all.shell( @@ -2458,7 +2458,7 @@ def test_copy_ps_to_non_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="absent") + hosts.all.zos_data_set(name=dest, type="PDSE", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2489,7 +2489,7 @@ def test_copy_ps_to_non_empty_ps_with_special_chars(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="absent") + hosts.all.zos_data_set(name=dest, type="PDSE", state="absent") hosts.all.zos_copy(content=DUMMY_DATA_SPECIAL_CHARS, dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2520,7 +2520,7 @@ def test_backup_sequential_data_set(ansible_zos_module, backup): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="PDSE", state="present") if backup: copy_res = hosts.all.zos_copy(src=src, dest=dest, force=True, backup=True, backup_name=backup) @@ -2565,10 +2565,10 @@ def test_copy_file_to_non_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="pdse", + type="PDSE", space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=80, replace=True ) @@ -2611,14 +2611,14 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="pdse", + type="PDSE", space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=80, replace=True ) - hosts.all.zos_data_set(name=dest, type="member", state="present") + hosts.all.zos_data_set(name=dest, type="MEMBER", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, force=src["force"], remote_src=src["is_remote"]) @@ -2647,31 +2647,31 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="seq", is_binary=False), - dict(type="seq", is_binary=True), - dict(type="pds", is_binary=False), - dict(type="pds", is_binary=True), - dict(type="pdse", is_binary=False), - dict(type="pdse", is_binary=True) + dict(type="PDSE", is_binary=False), + dict(type="PDSE", is_binary=True), + dict(type="PDSE", is_binary=False), + dict(type="PDSE", is_binary=True), + dict(type="PDSE", is_binary=False), + dict(type="PDSE", is_binary=True) ]) def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "PDSE" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "seq": - hosts.all.zos_data_set(name=src, type="member") + if args["type"] != "PDSE": + hosts.all.zos_data_set(name=src, type="MEMBER") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) + hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) copy_result = hosts.all.zos_copy(src=src, dest=dest, is_binary=args["is_binary"], remote_src=True) verify_copy = hosts.all.shell( @@ -2694,32 +2694,32 @@ def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="seq", force=False), - dict(type="seq", force=True), - dict(type="pds", force=False), - dict(type="pds", force=True), - dict(type="pdse", force=False), - dict(type="pdse", force=True) + dict(type="PDSE", force=False), + dict(type="PDSE", force=True), + dict(type="PDSE", force=False), + dict(type="PDSE", force=True), + dict(type="PDSE", force=False), + dict(type="PDSE", force=True) ]) def test_copy_data_set_to_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "PDSE" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "seq": - hosts.all.zos_data_set(name=src, type="member") + if args["type"] != "PDSE": + hosts.all.zos_data_set(name=src, type="MEMBER") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) - hosts.all.zos_data_set(name=dest, type="member") + hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) + hosts.all.zos_data_set(name=dest, type="MEMBER") copy_result = hosts.all.zos_copy(src=src, dest=dest, force=args["force"], remote_src=True) verify_copy = hosts.all.shell( @@ -2838,7 +2838,7 @@ def test_copy_dir_crlf_endings_to_non_existing_pdse(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_dir = "/tmp/testdir" @@ -2854,7 +2854,7 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): type=src_type, space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=80, ) @@ -2877,18 +2877,18 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDSE", "PDSE", "PDSE"]) def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if src_type == "seq" else "{0}(TEST)".format(src_data_set) + src = src_data_set if src_type == "PDSE" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=src_type) - if src_type != "seq": - hosts.all.zos_data_set(name=src, type="member") + if src_type != "PDSE": + hosts.all.zos_data_set(name=src, type="MEMBER") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), @@ -2918,10 +2918,10 @@ def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(src_type="pds", dest_type="pds"), - dict(src_type="pds", dest_type="pdse"), - dict(src_type="pdse", dest_type="pds"), - dict(src_type="pdse", dest_type="pdse"), + dict(src_type="PDSE", dest_type="PDSE"), + dict(src_type="PDSE", dest_type="PDSE"), + dict(src_type="PDSE", dest_type="PDSE"), + dict(src_type="PDSE", dest_type="PDSE"), ]) def test_copy_pds_to_existing_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -2973,7 +2973,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDSE", space_primary=2, record_format="FB", record_length=80, @@ -2984,7 +2984,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3006,7 +3006,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3018,7 +3018,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3111,7 +3111,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3122,7 +3122,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDSE", space_primary=2, record_format="FB", record_length=80, @@ -3132,7 +3132,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3143,7 +3143,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3261,7 +3261,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDSE", space_primary=2, record_format="FB", record_length=80, @@ -3272,7 +3272,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3300,7 +3300,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3312,7 +3312,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3453,7 +3453,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDSE", space_primary=2, record_format="FB", record_length=80, @@ -3464,7 +3464,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3524,7 +3524,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3615,7 +3615,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDSE", space_primary=2, record_format="FB", record_length=80, @@ -3626,7 +3626,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3651,7 +3651,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3663,7 +3663,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3827,7 +3827,7 @@ def test_copy_executables_uss_to_member(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3878,7 +3878,7 @@ def test_copy_pds_member_with_system_symbol(ansible_zos_module): hosts.all.zos_data_set( name=dest, state="present", - type="pdse", + type="PDSE", replace=True ) @@ -3914,8 +3914,8 @@ def test_copy_multiple_data_set_members(ansible_zos_module): ds_list = ["{0}({1})".format(src, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="pds") - hosts.all.zos_data_set(name=dest, type="pds") + hosts.all.zos_data_set(name=src, type="PDSE") + hosts.all.zos_data_set(name=dest, type="PDSE") for member in ds_list: hosts.all.shell( @@ -3960,8 +3960,8 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): dest_ds_list = ["{0}({1})".format(dest, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="pds") - hosts.all.zos_data_set(name=dest, type="pds") + hosts.all.zos_data_set(name=src, type="PDSE") + hosts.all.zos_data_set(name=dest, type="PDSE") for src_member in src_ds_list: hosts.all.shell( @@ -3994,7 +3994,7 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("ds_type", ["pds", "pdse"]) +@pytest.mark.parametrize("ds_type", ["PDSE", "PDSE"]) def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): hosts = ansible_zos_module data_set = get_tmp_ds_name() @@ -4032,10 +4032,10 @@ def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(ds_type="pds", force=False), - dict(ds_type="pds", force=True), - dict(ds_type="pdse", force=False), - dict(ds_type="pdse", force=True) + dict(ds_type="PDSE", force=False), + dict(ds_type="PDSE", force=True), + dict(ds_type="PDSE", force=False), + dict(ds_type="PDSE", force=True) ]) def test_copy_member_to_existing_uss_file(ansible_zos_module, args): hosts = ansible_zos_module @@ -4079,7 +4079,7 @@ def test_copy_member_to_existing_uss_file(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4124,7 +4124,7 @@ def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) def test_copy_member_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4170,7 +4170,7 @@ def test_copy_member_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4206,10 +4206,10 @@ def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="pds", force=False), - dict(type="pds", force=True), - dict(type="pdse", force=False), - dict(type="pdse", force=True), + dict(type="PDSE", force=False), + dict(type="PDSE", force=True), + dict(type="PDSE", force=False), + dict(type="PDSE", force=True), ]) def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): hosts = ansible_zos_module @@ -4218,7 +4218,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present", replace=True) + hosts.all.zos_data_set(name=dest, type="PDSE", state="present", replace=True) hosts.all.zos_data_set(name=src_ds, type=args["type"], state="present") for data_set in [src, dest]: @@ -4251,7 +4251,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("dest_type", ["pds", "pdse"]) +@pytest.mark.parametrize("dest_type", ["PDSE", "PDSE"]) def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): hosts = ansible_zos_module src = "/etc/profile" @@ -4262,7 +4262,7 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): type=dest_type, space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=25, ) @@ -4294,10 +4294,10 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="pds", backup=None), - dict(type="pds", backup="USER.TEST.PDS.BACKUP"), - dict(type="pdse", backup=None), - dict(type="pdse", backup="USER.TEST.PDSE.BACKUP"), + dict(type="PDSE", backup=None), + dict(type="PDSE", backup="USER.TEST.PDS.BACKUP"), + dict(type="PDSE", backup=None), + dict(type="PDSE", backup="USER.TEST.PDSE.BACKUP"), ]) def test_backup_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -4343,7 +4343,7 @@ def test_backup_pds(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDSE", "PDSE", "PDSE"]) def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_type): hosts = ansible_zos_module source = get_tmp_ds_name() @@ -4359,8 +4359,8 @@ def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_typ try: hosts.all.zos_data_set(name=source, type=src_type, state='present') - if src_type != "seq": - hosts.all.zos_data_set(name=source_member, type="member", state='present') + if src_type != "PDSE": + hosts.all.zos_data_set(name=source_member, type="MEMBER", state='present') copy_res = hosts.all.zos_copy( src=source, @@ -4631,7 +4631,7 @@ def test_copy_uss_file_to_existing_sequential_data_set_twice_with_tmphlq_option( src_file = "/etc/profile" tmphlq = "TMPHLQ" try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="PDSE", state="present") copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, force=force) copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, backup=True, tmp_hlq=tmphlq, force=force) From 73eae1c3cd1786b242b359d5cf58d84d5b5eaf96 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Wed, 20 Mar 2024 19:57:12 -0600 Subject: [PATCH 04/28] Revert "Changed case sensitive options" This reverts commit 9c5bab3c39214ff2e4c0ab07f28a1624a0d336ae. --- .../functional/modules/test_zos_copy_func.py | 194 +++++++++--------- 1 file changed, 97 insertions(+), 97 deletions(-) diff --git a/tests/functional/modules/test_zos_copy_func.py b/tests/functional/modules/test_zos_copy_func.py index bbd598f1c..cf7f1494b 100644 --- a/tests/functional/modules/test_zos_copy_func.py +++ b/tests/functional/modules/test_zos_copy_func.py @@ -1684,7 +1684,7 @@ def test_copy_seq_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="seq", replace=True ) @@ -1733,7 +1733,7 @@ def test_copy_seq_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="seq", replace=True ) @@ -1784,7 +1784,7 @@ def test_copy_partitioned_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -1834,7 +1834,7 @@ def test_copy_partitioned_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -1884,7 +1884,7 @@ def test_copy_asa_data_set_to_text_file(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="seq", record_format="FBA", record_length=80, block_size=27920, @@ -1977,8 +1977,8 @@ def test_copy_dest_lock(ansible_zos_module, ds_type): hosts.all.zos_data_set(name=data_set_1, state="present", type=ds_type, replace=True) hosts.all.zos_data_set(name=data_set_2, state="present", type=ds_type, replace=True) if ds_type == "PDS" or ds_type == "PDSE": - hosts.all.zos_data_set(name=src_data_set, state="present", type="MEMBER", replace=True) - hosts.all.zos_data_set(name=dest_data_set, state="present", type="MEMBER", replace=True) + hosts.all.zos_data_set(name=src_data_set, state="present", type="member", replace=True) + hosts.all.zos_data_set(name=dest_data_set, state="present", type="member", replace=True) # copy text_in source hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(DUMMY_DATA, src_data_set)) # copy/compile c program and copy jcl to hold data set lock for n seconds in background(&) @@ -2266,7 +2266,7 @@ def test_copy_file_to_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2294,7 +2294,7 @@ def test_copy_file_to_non_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2432,7 +2432,7 @@ def test_copy_ps_to_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) verify_copy = hosts.all.shell( @@ -2458,7 +2458,7 @@ def test_copy_ps_to_non_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2489,7 +2489,7 @@ def test_copy_ps_to_non_empty_ps_with_special_chars(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content=DUMMY_DATA_SPECIAL_CHARS, dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2520,7 +2520,7 @@ def test_backup_sequential_data_set(ansible_zos_module, backup): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") if backup: copy_res = hosts.all.zos_copy(src=src, dest=dest, force=True, backup=True, backup_name=backup) @@ -2565,10 +2565,10 @@ def test_copy_file_to_non_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="PDSE", + type="pdse", space_primary=5, space_type="M", - record_format="FBA", + record_format="fba", record_length=80, replace=True ) @@ -2611,14 +2611,14 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="PDSE", + type="pdse", space_primary=5, space_type="M", - record_format="FBA", + record_format="fba", record_length=80, replace=True ) - hosts.all.zos_data_set(name=dest, type="MEMBER", state="present") + hosts.all.zos_data_set(name=dest, type="member", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, force=src["force"], remote_src=src["is_remote"]) @@ -2647,31 +2647,31 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDSE", is_binary=False), - dict(type="PDSE", is_binary=True), - dict(type="PDSE", is_binary=False), - dict(type="PDSE", is_binary=True), - dict(type="PDSE", is_binary=False), - dict(type="PDSE", is_binary=True) + dict(type="seq", is_binary=False), + dict(type="seq", is_binary=True), + dict(type="pds", is_binary=False), + dict(type="pds", is_binary=True), + dict(type="pdse", is_binary=False), + dict(type="pdse", is_binary=True) ]) def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "PDSE" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "PDSE": - hosts.all.zos_data_set(name=src, type="MEMBER") + if args["type"] != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) + hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) copy_result = hosts.all.zos_copy(src=src, dest=dest, is_binary=args["is_binary"], remote_src=True) verify_copy = hosts.all.shell( @@ -2694,32 +2694,32 @@ def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDSE", force=False), - dict(type="PDSE", force=True), - dict(type="PDSE", force=False), - dict(type="PDSE", force=True), - dict(type="PDSE", force=False), - dict(type="PDSE", force=True) + dict(type="seq", force=False), + dict(type="seq", force=True), + dict(type="pds", force=False), + dict(type="pds", force=True), + dict(type="pdse", force=False), + dict(type="pdse", force=True) ]) def test_copy_data_set_to_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "PDSE" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "PDSE": - hosts.all.zos_data_set(name=src, type="MEMBER") + if args["type"] != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) - hosts.all.zos_data_set(name=dest, type="MEMBER") + hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) + hosts.all.zos_data_set(name=dest, type="member") copy_result = hosts.all.zos_copy(src=src, dest=dest, force=args["force"], remote_src=True) verify_copy = hosts.all.shell( @@ -2838,7 +2838,7 @@ def test_copy_dir_crlf_endings_to_non_existing_pdse(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_dir = "/tmp/testdir" @@ -2854,7 +2854,7 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): type=src_type, space_primary=5, space_type="M", - record_format="FBA", + record_format="fba", record_length=80, ) @@ -2877,18 +2877,18 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDSE", "PDSE", "PDSE"]) +@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if src_type == "PDSE" else "{0}(TEST)".format(src_data_set) + src = src_data_set if src_type == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=src_type) - if src_type != "PDSE": - hosts.all.zos_data_set(name=src, type="MEMBER") + if src_type != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), @@ -2918,10 +2918,10 @@ def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(src_type="PDSE", dest_type="PDSE"), - dict(src_type="PDSE", dest_type="PDSE"), - dict(src_type="PDSE", dest_type="PDSE"), - dict(src_type="PDSE", dest_type="PDSE"), + dict(src_type="pds", dest_type="pds"), + dict(src_type="pds", dest_type="pdse"), + dict(src_type="pdse", dest_type="pds"), + dict(src_type="pdse", dest_type="pdse"), ]) def test_copy_pds_to_existing_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -2973,7 +2973,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDSE", + type="pds", space_primary=2, record_format="FB", record_length=80, @@ -2984,7 +2984,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3006,7 +3006,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3018,7 +3018,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3111,7 +3111,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3122,7 +3122,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDSE", + type="pds", space_primary=2, record_format="FB", record_length=80, @@ -3132,7 +3132,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3143,7 +3143,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3261,7 +3261,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDSE", + type="pds", space_primary=2, record_format="FB", record_length=80, @@ -3272,7 +3272,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3300,7 +3300,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3312,7 +3312,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3453,7 +3453,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDSE", + type="pds", space_primary=2, record_format="FB", record_length=80, @@ -3464,7 +3464,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3524,7 +3524,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3615,7 +3615,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDSE", + type="pds", space_primary=2, record_format="FB", record_length=80, @@ -3626,7 +3626,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3651,7 +3651,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3663,7 +3663,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3827,7 +3827,7 @@ def test_copy_executables_uss_to_member(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest, state="present", - type="PDSE", + type="pdse", record_format="U", record_length=0, block_size=32760, @@ -3878,7 +3878,7 @@ def test_copy_pds_member_with_system_symbol(ansible_zos_module): hosts.all.zos_data_set( name=dest, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -3914,8 +3914,8 @@ def test_copy_multiple_data_set_members(ansible_zos_module): ds_list = ["{0}({1})".format(src, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="PDSE") - hosts.all.zos_data_set(name=dest, type="PDSE") + hosts.all.zos_data_set(name=src, type="pds") + hosts.all.zos_data_set(name=dest, type="pds") for member in ds_list: hosts.all.shell( @@ -3960,8 +3960,8 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): dest_ds_list = ["{0}({1})".format(dest, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="PDSE") - hosts.all.zos_data_set(name=dest, type="PDSE") + hosts.all.zos_data_set(name=src, type="pds") + hosts.all.zos_data_set(name=dest, type="pds") for src_member in src_ds_list: hosts.all.shell( @@ -3994,7 +3994,7 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("ds_type", ["PDSE", "PDSE"]) +@pytest.mark.parametrize("ds_type", ["pds", "pdse"]) def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): hosts = ansible_zos_module data_set = get_tmp_ds_name() @@ -4032,10 +4032,10 @@ def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(ds_type="PDSE", force=False), - dict(ds_type="PDSE", force=True), - dict(ds_type="PDSE", force=False), - dict(ds_type="PDSE", force=True) + dict(ds_type="pds", force=False), + dict(ds_type="pds", force=True), + dict(ds_type="pdse", force=False), + dict(ds_type="pdse", force=True) ]) def test_copy_member_to_existing_uss_file(ansible_zos_module, args): hosts = ansible_zos_module @@ -4079,7 +4079,7 @@ def test_copy_member_to_existing_uss_file(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4124,7 +4124,7 @@ def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_member_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4170,7 +4170,7 @@ def test_copy_member_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDSE", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4206,10 +4206,10 @@ def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDSE", force=False), - dict(type="PDSE", force=True), - dict(type="PDSE", force=False), - dict(type="PDSE", force=True), + dict(type="pds", force=False), + dict(type="pds", force=True), + dict(type="pdse", force=False), + dict(type="pdse", force=True), ]) def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): hosts = ansible_zos_module @@ -4218,7 +4218,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="present", replace=True) + hosts.all.zos_data_set(name=dest, type="seq", state="present", replace=True) hosts.all.zos_data_set(name=src_ds, type=args["type"], state="present") for data_set in [src, dest]: @@ -4251,7 +4251,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("dest_type", ["PDSE", "PDSE"]) +@pytest.mark.parametrize("dest_type", ["pds", "pdse"]) def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): hosts = ansible_zos_module src = "/etc/profile" @@ -4262,7 +4262,7 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): type=dest_type, space_primary=5, space_type="M", - record_format="FBA", + record_format="fba", record_length=25, ) @@ -4294,10 +4294,10 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDSE", backup=None), - dict(type="PDSE", backup="USER.TEST.PDS.BACKUP"), - dict(type="PDSE", backup=None), - dict(type="PDSE", backup="USER.TEST.PDSE.BACKUP"), + dict(type="pds", backup=None), + dict(type="pds", backup="USER.TEST.PDS.BACKUP"), + dict(type="pdse", backup=None), + dict(type="pdse", backup="USER.TEST.PDSE.BACKUP"), ]) def test_backup_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -4343,7 +4343,7 @@ def test_backup_pds(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDSE", "PDSE", "PDSE"]) +@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_type): hosts = ansible_zos_module source = get_tmp_ds_name() @@ -4359,8 +4359,8 @@ def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_typ try: hosts.all.zos_data_set(name=source, type=src_type, state='present') - if src_type != "PDSE": - hosts.all.zos_data_set(name=source_member, type="MEMBER", state='present') + if src_type != "seq": + hosts.all.zos_data_set(name=source_member, type="member", state='present') copy_res = hosts.all.zos_copy( src=source, @@ -4631,7 +4631,7 @@ def test_copy_uss_file_to_existing_sequential_data_set_twice_with_tmphlq_option( src_file = "/etc/profile" tmphlq = "TMPHLQ" try: - hosts.all.zos_data_set(name=dest, type="PDSE", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, force=force) copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, backup=True, tmp_hlq=tmphlq, force=force) From 6e08d0730800502958876813bdd7577199dc8d50 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Fri, 29 Mar 2024 09:06:26 -0600 Subject: [PATCH 05/28] Added Needs Triage on bug template (#1314) --- .github/ISSUE_TEMPLATE/bug_issue.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_issue.yml b/.github/ISSUE_TEMPLATE/bug_issue.yml index 2193cb615..9395c85b1 100644 --- a/.github/ISSUE_TEMPLATE/bug_issue.yml +++ b/.github/ISSUE_TEMPLATE/bug_issue.yml @@ -1,7 +1,7 @@ name: Report a bug description: Request that a bug be reviewed. Complete all required fields. title: "[Bug] Enter description" -labels: [Bug] +labels: ["Bug", "Needs Triage" ] assignees: - IBMAnsibleHelper body: From 3b4951042c5ae79587e66e52dae2fa27dc922e1a Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Fri, 29 Mar 2024 15:43:19 -0600 Subject: [PATCH 06/28] Add galaxy importer into ac as a command and create a GitHub action (#1305) * Create bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Added changelog action * Update changelog.yml * Create close-stale-issues * Update close-stale-issues Quite el workflow dispatch * Create bandit2.yml * Update bandit2.yml * Update zos_copy.py * Update zos_copy.py Me equivoque * Create ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Create ac_changelog.yml * Delete .github/workflows/ac_changelog.yml * Create ac_changelog.yml * Update ac_changelog.yml * Add galaxy importer to ac and create workflow with the ac command for it * Delete a jump of line * Create ac-galaxy-importer.yml * Rename action * Rename job * Update ac-galaxy-importer.yml * Fix * Fix * Rename ac-galaxy-importer to ac-galaxy-importer.yml * Acomodate function documentation in ac * Delete invasive files * Added line * Update ac * Update ac * Update ac --------- Co-authored-by: Fernando Flores --- .github/workflows/ac-galaxy-importer.yml | 40 ++++++++++++++++++++++++ ac | 18 +++++++++++ 2 files changed, 58 insertions(+) create mode 100644 .github/workflows/ac-galaxy-importer.yml diff --git a/.github/workflows/ac-galaxy-importer.yml b/.github/workflows/ac-galaxy-importer.yml new file mode 100644 index 000000000..271f01c22 --- /dev/null +++ b/.github/workflows/ac-galaxy-importer.yml @@ -0,0 +1,40 @@ +name: AC Galaxy Importer + +on: + pull_request: + branches: + - dev + - staging* + +jobs: + galaxy-importer: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.11 + + - name: Set up venv + run: | + python -m pip install --upgrade pip + pip install virtualenv + mkdir venv + virtualenv venv/venv-2.16 + + - name: Install dependencies + run: | + source venv/venv-2.16/bin/activate + python -m pip install --upgrade pip + pip install ansible + pip install ansible-importer + pip install galaxy-importer + + - name: Run ac-galaxy-importer + run: | + source venv/venv-2.16/bin/activate + ./ac --ac-galaxy-importer diff --git a/ac b/ac index bb307f4a6..9aee6a02d 100755 --- a/ac +++ b/ac @@ -242,6 +242,18 @@ ac_build(){ } # ------------------------------------------------------------------------------ +# Run galaxy importer on collection. +# ------------------------------------------------------------------------------ +#->ac-galaxy-importer: +## Build current branch and run galaxy importer on collection. +## Usage: ac [--ac-galaxy-importer] +## Example: +## $ ac --ac-galaxy-importer +ac_galaxy_importer(){ + message "Running Galaxy Importer" + . $VENV_BIN/activate && collection_name=$($VENV_BIN/ansible-galaxy collection build --force | awk -F/ '{print $NF}') && python -m galaxy_importer.main $collection_name +} + # Run a changelog lint locally # ------------------------------------------------------------------------------ #->ac-changelog: @@ -679,6 +691,10 @@ while true; do ensure_managed_venv_exists $1 option_submitted="--ac-build" ;; + --ac-galaxy-importer) # Command + ensure_managed_venv_exists $1 + option_submitted="--ac-galaxy-importer" + ;; --ac-changelog) # Command ensure_managed_venv_exists $1 option_submitted="--ac-changelog" @@ -835,6 +851,8 @@ if [ "$option_submitted" ] && [ "$option_submitted" = "--ac-bandit" ] ; then ac_bandit $level elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-build" ] ; then ac_build +elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-galaxy-importer" ] ; then + ac_galaxy_importer elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-changelog" ] ; then ac_changelog $command elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-install" ] ; then From 5788acdc7a16895407189603aee0c7ab965352ec Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Fri, 29 Mar 2024 15:45:03 -0600 Subject: [PATCH 07/28] Create bandit github action using the ac command (#1310) --- .github/workflows/ac-bandit.yml | 38 +++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 .github/workflows/ac-bandit.yml diff --git a/.github/workflows/ac-bandit.yml b/.github/workflows/ac-bandit.yml new file mode 100644 index 000000000..288fb92b1 --- /dev/null +++ b/.github/workflows/ac-bandit.yml @@ -0,0 +1,38 @@ +name: AC Bandit + +on: + pull_request: + branches: + - dev + - staging* + +jobs: + bandit: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.11 + + - name: Set up venv + run: | + python -m pip install --upgrade pip + pip install virtualenv + mkdir venv + virtualenv venv/venv-2.16 + + - name: Install dependencies + run: | + source venv/venv-2.16/bin/activate + python -m pip install --upgrade pip + pip install bandit + + - name: Run ac-bandit + run: | + source venv/venv-2.16/bin/activate + ./ac --ac-bandit --level l From f7e9c1bc3f27291009c5387e597e39c71405b7eb Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Fri, 29 Mar 2024 15:57:54 -0600 Subject: [PATCH 08/28] [Enabler] [zos_copy] Fix sanity issues and remove ignore files (#1307) * Removed localchartset * Fixed sanity * Updated to encoding * Updated encoding parser * Fixed encoding to null when remote_src=true * Updated condition to set encoding to none * removed size parameter * Changed src * Added full local src * Corrected base name for temp_path * Fixed pep8 issue * Changed temp_path to src logic * Added src to temp_path * Added module fail * Replaced temp name generation * Placed temporary file into tmp folder * Removing temp_path * Added latest temp path changes * Fixed lock check issue * Removed temp_path * Removed temp path * Removed is_something vasrs * Fixed comment * Added latest zos_copy_changes * Removed print statements * Removed ingore entry * removed entries * Corrected case sensitivity in tests * Fixed lowercase * Modified docs * Added changelog --- .../fragments/1307-update-sanity-zos_copy.yml | 10 + plugins/action/zos_copy.py | 61 +++-- plugins/modules/zos_copy.py | 224 ++++++++---------- .../functional/modules/test_zos_copy_func.py | 200 ++++++++-------- tests/sanity/ignore-2.14.txt | 2 - tests/sanity/ignore-2.15.txt | 2 - tests/sanity/ignore-2.16.txt | 2 - 7 files changed, 241 insertions(+), 260 deletions(-) create mode 100644 changelogs/fragments/1307-update-sanity-zos_copy.yml diff --git a/changelogs/fragments/1307-update-sanity-zos_copy.yml b/changelogs/fragments/1307-update-sanity-zos_copy.yml new file mode 100644 index 000000000..858f0b64c --- /dev/null +++ b/changelogs/fragments/1307-update-sanity-zos_copy.yml @@ -0,0 +1,10 @@ +minor_changes: + - zos_copy - Documented `group` and `owner` options. + (https://github.com/ansible-collections/ibm_zos_core/pull/1307). + +trivial: + - zos_copy - Removed many of the variables that were passed from the + action plugin to the module, reimplementing the logic inside the + module instead. Removed the use of temp_path variable inside zos_copy + in favor of using remote_src to deal with files copied to remote. + (https://github.com/ansible-collections/ibm_zos_core/pull/1307). \ No newline at end of file diff --git a/plugins/action/zos_copy.py b/plugins/action/zos_copy.py index 592126b00..e9c238b87 100644 --- a/plugins/action/zos_copy.py +++ b/plugins/action/zos_copy.py @@ -29,11 +29,10 @@ from ansible import cli from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.data_set import ( - is_member, - is_data_set + is_member ) -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import encode, validation +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import encode from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import template @@ -69,8 +68,8 @@ def run(self, tmp=None, task_vars=None): owner = task_args.get("owner", None) group = task_args.get("group", None) - is_pds = is_src_dir = False - temp_path = is_uss = is_mvs_dest = src_member = None + is_src_dir = False + temp_path = is_uss = None if dest: if not isinstance(dest, string_types): @@ -78,7 +77,6 @@ def run(self, tmp=None, task_vars=None): return self._exit_action(result, msg, failed=True) else: is_uss = "/" in dest - is_mvs_dest = is_data_set(dest) else: msg = "Destination is required" return self._exit_action(result, msg, failed=True) @@ -96,13 +94,11 @@ def run(self, tmp=None, task_vars=None): msg = "'src' or 'dest' must not be empty" return self._exit_action(result, msg, failed=True) else: - src_member = is_member(src) if not remote_src: if src.startswith('~'): src = os.path.expanduser(src) src = os.path.realpath(src) is_src_dir = os.path.isdir(src) - is_pds = is_src_dir and is_mvs_dest if not src and not content: msg = "'src' or 'content' is required" @@ -196,11 +192,6 @@ def run(self, tmp=None, task_vars=None): src = rendered_dir - task_args["size"] = sum( - os.stat(os.path.join(validation.validate_safe_path(path), validation.validate_safe_path(f))).st_size - for path, dirs, files in os.walk(src) - for f in files - ) else: if mode == "preserve": task_args["mode"] = "0{0:o}".format( @@ -231,7 +222,6 @@ def run(self, tmp=None, task_vars=None): src = rendered_file - task_args["size"] = os.stat(src).st_size display.vvv(u"ibm_zos_copy calculated size: {0}".format(os.stat(src).st_size), host=self._play_context.remote_addr) transfer_res = self._copy_to_remote( src, is_dir=is_src_dir, ignore_stderr=ignore_sftp_stderr @@ -242,15 +232,31 @@ def run(self, tmp=None, task_vars=None): return transfer_res display.vvv(u"ibm_zos_copy temp path: {0}".format(transfer_res.get("temp_path")), host=self._play_context.remote_addr) + if not encoding: + encoding = { + "from": encode.Defaults.get_default_system_charset(), + } + + """ + We format temp_path correctly to pass it as src option to the module, + we keep the original source to return to the user and avoid confusion + by returning the temp_path created. + """ + original_src = task_args.get("src") + if original_src: + if not remote_src: + base_name = os.path.basename(original_src) + if original_src.endswith("/"): + src = temp_path + "/" + else: + src = temp_path + else: + src = temp_path + task_args.update( dict( - is_uss=is_uss, - is_pds=is_pds, - is_src_dir=is_src_dir, - src_member=src_member, - temp_path=temp_path, - is_mvs_dest=is_mvs_dest, - local_charset=encode.Defaults.get_default_system_charset() + src=src, + encoding=encoding, ) ) copy_res = self._execute_module( @@ -284,17 +290,20 @@ def run(self, tmp=None, task_vars=None): self._remote_cleanup(dest, copy_res.get("dest_exists"), task_vars) return result - return _update_result(is_binary, copy_res, self._task.args) + return _update_result(is_binary, copy_res, self._task.args, original_src) def _copy_to_remote(self, src, is_dir=False, ignore_stderr=False): """Copy a file or directory to the remote z/OS system """ - temp_path = "/{0}/{1}".format(gettempprefix(), _create_temp_path_name()) + temp_path = "/{0}/{1}/{2}".format(gettempprefix(), _create_temp_path_name(), os.path.basename(src)) + self._connection.exec_command("mkdir -p {0}".format(os.path.dirname(temp_path))) _src = src.replace("#", "\\#") _sftp_action = 'put' + full_temp_path = temp_path if is_dir: src = src.rstrip("/") if src.endswith("/") else src + temp_path = os.path.dirname(temp_path) base = os.path.basename(src) self._connection.exec_command("mkdir -p {0}/{1}".format(temp_path, base)) _sftp_action += ' -r' # add '-r` to clone the source trees @@ -379,7 +388,7 @@ def _copy_to_remote(self, src, is_dir=False, ignore_stderr=False): display.vvv(u"ibm_zos_copy SSH transfer method restored to {0}".format(user_ssh_transfer_method), host=self._play_context.remote_addr) is_ssh_transfer_method_updated = False - return dict(temp_path=temp_path) + return dict(temp_path=full_temp_path) def _remote_cleanup(self, dest, dest_exists, task_vars): """Remove all files or data sets pointed to by 'dest' on the remote @@ -417,7 +426,7 @@ def _exit_action(self, result, msg, failed=False): return result -def _update_result(is_binary, copy_res, original_args): +def _update_result(is_binary, copy_res, original_args, original_src): """ Helper function to update output result with the provided values """ ds_type = copy_res.get("ds_type") src = copy_res.get("src") @@ -431,7 +440,7 @@ def _update_result(is_binary, copy_res, original_args): invocation=dict(module_args=original_args), ) if src: - updated_result["src"] = src + updated_result["src"] = original_src if note: updated_result["note"] = note if backup_name: diff --git a/plugins/modules/zos_copy.py b/plugins/modules/zos_copy.py index a854d1cae..6991c4d81 100644 --- a/plugins/modules/zos_copy.py +++ b/plugins/modules/zos_copy.py @@ -143,7 +143,7 @@ to: description: - The encoding to be converted to - required: true + required: false type: str tmp_hlq: description: @@ -243,6 +243,15 @@ type: bool default: true required: false + group: + description: + - Name of the group that will own the file system objects. + - When left unspecified, it uses the current group of the current user + unless you are root, in which case it can preserve the previous + ownership. + - This option is only applicable if C(dest) is USS, otherwise ignored. + type: str + required: false mode: description: - The permission of the destination file or directory. @@ -261,6 +270,15 @@ the source file. type: str required: false + owner: + description: + - Name of the user that should own the filesystem object, as would be + passed to the chown command. + - When left unspecified, it uses the current user unless you are root, + in which case it can preserve the previous ownership. + - This option is only applicable if C(dest) is USS, otherwise ignored. + type: str + required: false remote_src: description: - If set to C(false), the module searches for C(src) at the local machine. @@ -803,37 +821,35 @@ """ -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.import_handler import ( - ZOAUImportError, -) -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.mvs_cmd import ( - idcams -) -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import ( - better_arg_parser, data_set, encode, backup, copy, validation, -) -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.ansible_module import ( - AnsibleModuleHelper, -) -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.data_set import ( - is_member -) -from ansible.module_utils._text import to_bytes, to_native -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import PY3 -from re import IGNORECASE -from hashlib import sha256 import glob +import math +import os import shutil import stat -import math import tempfile -import os import traceback +from hashlib import sha256 +from re import IGNORECASE + +from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import PY3 +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import ( + backup, better_arg_parser, copy, data_set, encode, validation) +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.ansible_module import \ + AnsibleModuleHelper +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.data_set import ( + is_member, + is_data_set +) +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.import_handler import \ + ZOAUImportError +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.mvs_cmd import \ + idcams if PY3: - from re import fullmatch import pathlib + from re import fullmatch else: from re import match as fullmatch @@ -892,7 +908,6 @@ def run_command(self, cmd, **kwargs): def copy_to_seq( self, src, - temp_path, conv_path, dest, src_type @@ -904,13 +919,11 @@ def copy_to_seq( Arguments: src {str} -- Path to USS file or data set name - temp_path {str} -- Path to the location where the control node - transferred data to conv_path {str} -- Path to the converted source file dest {str} -- Name of destination data set src_type {str} -- Type of the source """ - new_src = conv_path or temp_path or src + new_src = conv_path or src copy_args = dict() copy_args["options"] = "" @@ -1031,15 +1044,15 @@ def copy_tree(self, src_dir, dest_dir, dirs_exist_ok=False): entries = list(itr) return self._copy_tree(entries, src_dir, dest_dir, dirs_exist_ok=dirs_exist_ok) - def convert_encoding(self, src, temp_path, encoding): + def convert_encoding(self, src, encoding, remote_src): """Convert encoding for given src Arguments: src {str} -- Path to the USS source file or directory - temp_path {str} -- Path to the location where the control node - transferred data to encoding {dict} -- Charsets that the source is to be converted from and to + remote_src {bool} -- Whether the file was already on the remote + node or not. Raises: CopyOperationError -- When the encoding of a USS file is not @@ -1051,19 +1064,10 @@ def convert_encoding(self, src, temp_path, encoding): from_code_set = encoding.get("from") to_code_set = encoding.get("to") enc_utils = encode.EncodeUtils() - new_src = temp_path or src - + new_src = src if os.path.isdir(new_src): - if temp_path: - if src.endswith("/"): - new_src = "{0}/{1}".format( - temp_path, os.path.basename(os.path.dirname(src)) - ) - else: - new_src = "{0}/{1}".format(temp_path, - os.path.basename(src)) try: - if not temp_path: + if remote_src: temp_dir = tempfile.mkdtemp() shutil.copytree(new_src, temp_dir, dirs_exist_ok=True) new_src = temp_dir @@ -1081,7 +1085,7 @@ def convert_encoding(self, src, temp_path, encoding): raise CopyOperationError(msg=str(err)) else: try: - if not temp_path: + if remote_src: fd, temp_src = tempfile.mkstemp() os.close(fd) shutil.copy(new_src, temp_src) @@ -1270,24 +1274,23 @@ def copy_to_uss( src, dest, conv_path, - temp_path, src_ds_type, src_member, member_name, - force + force, + content_copy, ): """Copy a file or data set to a USS location Arguments: src {str} -- The USS source dest {str} -- Destination file or directory on USS - temp_path {str} -- Path to the location where the control node - transferred data to conv_path {str} -- Path to the converted source file or directory src_ds_type {str} -- Type of source src_member {bool} -- Whether src is a data set member member_name {str} -- The name of the source data set member force {bool} -- Whether to copy files to an already existing directory + content_copy {bool} -- Whether copy is using content option or not. Returns: {str} -- Destination where the file was copied to @@ -1322,11 +1325,11 @@ def copy_to_uss( if "File exists" not in err: raise CopyOperationError(msg=to_native(err)) - if os.path.isfile(temp_path or conv_path or src): - dest = self._copy_to_file(src, dest, conv_path, temp_path) + if os.path.isfile(conv_path or src): + dest = self._copy_to_file(src, dest, content_copy, conv_path) changed_files = None else: - dest, changed_files = self._copy_to_dir(src, dest, conv_path, temp_path, force) + dest, changed_files = self._copy_to_dir(src, dest, conv_path, force) if self.common_file_args is not None: mode = self.common_file_args.get("mode") @@ -1347,14 +1350,13 @@ def copy_to_uss( self.module.set_owner_if_different(dest, owner, False) return dest - def _copy_to_file(self, src, dest, conv_path, temp_path): + def _copy_to_file(self, src, dest, content_copy, conv_path): """Helper function to copy a USS src to USS dest. Arguments: src {str} -- USS source file path dest {str} -- USS dest file path - temp_path {str} -- Path to the location where the control node - transferred data to + content_copy {bool} -- Whether copy is using content option or not. conv_path {str} -- Path to the converted source file or directory Raises: @@ -1363,11 +1365,10 @@ def _copy_to_file(self, src, dest, conv_path, temp_path): Returns: {str} -- Destination where the file was copied to """ - src_path = os.path.basename(src) if src else "inline_copy" + src_path = os.path.basename(src) if not content_copy else "inline_copy" if os.path.isdir(dest): dest = os.path.join(validation.validate_safe_path(dest), validation.validate_safe_path(src_path)) - - new_src = temp_path or conv_path or src + new_src = conv_path or src try: if self.is_binary: copy.copy_uss2uss_binary(new_src, dest) @@ -1402,7 +1403,6 @@ def _copy_to_dir( src_dir, dest_dir, conv_path, - temp_path, force ): """Helper function to copy a USS directory to another USS directory. @@ -1413,8 +1413,6 @@ def _copy_to_dir( src_dir {str} -- USS source directory dest_dir {str} -- USS dest directory conv_path {str} -- Path to the converted source directory - temp_path {str} -- Path to the location where the control node - transferred data to force {bool} -- Whether to copy files to an already existing directory Raises: @@ -1426,14 +1424,7 @@ def _copy_to_dir( that got copied. """ copy_directory = True if not src_dir.endswith("/") else False - - if temp_path: - temp_path = "{0}/{1}".format( - temp_path, - os.path.basename(os.path.normpath(src_dir)) - ) - - new_src_dir = temp_path or conv_path or src_dir + new_src_dir = conv_path or src_dir new_src_dir = os.path.normpath(new_src_dir) dest = dest_dir changed_files, original_permissions = self._get_changed_files(new_src_dir, dest_dir, copy_directory) @@ -1661,7 +1652,6 @@ def __init__( def copy_to_pdse( self, src, - temp_path, conv_path, dest, src_ds_type, @@ -1676,8 +1666,6 @@ def copy_to_pdse( Arguments: src {str} -- Path to USS file/directory or data set name. - temp_path {str} -- Path to the location where the control node - transferred data to. conv_path {str} -- Path to the converted source file/directory. dest {str} -- Name of destination data set. src_ds_type {str} -- The type of source. @@ -1685,7 +1673,7 @@ def copy_to_pdse( dest_member {str, optional} -- Name of destination member in data set. encoding {dict, optional} -- Dictionary with encoding options. """ - new_src = conv_path or temp_path or src + new_src = conv_path or src src_members = [] dest_members = [] @@ -2660,15 +2648,10 @@ def run_module(module, arg_def): owner = module.params.get('owner') encoding = module.params.get('encoding') volume = module.params.get('volume') - is_uss = module.params.get('is_uss') - is_pds = module.params.get('is_pds') - is_src_dir = module.params.get('is_src_dir') - is_mvs_dest = module.params.get('is_mvs_dest') - temp_path = module.params.get('temp_path') - src_member = module.params.get('src_member') tmphlq = module.params.get('tmp_hlq') force = module.params.get('force') force_lock = module.params.get('force_lock') + content = module.params.get('content') dest_data_set = module.params.get('dest_data_set') if dest_data_set: @@ -2676,6 +2659,13 @@ def run_module(module, arg_def): dest_data_set["volumes"] = [volume] copy_member = is_member(dest) + # This section we initialize different variables + # that we used to pass from the action plugin. + is_src_dir = os.path.isdir(src) + is_uss = "/" in dest + is_mvs_dest = is_data_set(dest) + is_pds = is_src_dir and is_mvs_dest + src_member = is_member(src) # ******************************************************************** # When copying to and from a data set member, 'dest' or 'src' will be @@ -2722,18 +2712,17 @@ def run_module(module, arg_def): # data sets with record format 'FBA' or 'VBA'. src_has_asa_chars = dest_has_asa_chars = False try: - # If temp_path, the plugin has copied a file from the controller to USS. - if temp_path or "/" in src: + if "/" in src: src_ds_type = "USS" - if remote_src and os.path.isdir(src): + if os.path.isdir(src): is_src_dir = True # When the destination is a dataset, we'll normalize the source # file to UTF-8 for the record length computation as Python # generally uses UTF-8 as the default encoding. if not is_binary and not is_uss and not executable: - new_src = temp_path or src + new_src = src new_src = os.path.normpath(new_src) # Normalizing encoding when src is a USS file (only). encode_utils = encode.EncodeUtils() @@ -2790,9 +2779,8 @@ def run_module(module, arg_def): if is_uss: dest_ds_type = "USS" if src_ds_type == "USS" and not is_src_dir and (dest.endswith("/") or os.path.isdir(dest)): - src_basename = os.path.basename(src) if src else "inline_copy" + src_basename = os.path.basename(src) if not content else "inline_copy" dest = os.path.normpath("{0}/{1}".format(dest, src_basename)) - if dest.startswith("//"): dest = dest.replace("//", "/") @@ -2841,12 +2829,7 @@ def run_module(module, arg_def): if copy_member: dest_member_exists = dest_exists and data_set.DataSet.data_set_member_exists(dest) elif src_ds_type == "USS": - if temp_path: - root_dir = "{0}/{1}".format(temp_path, os.path.basename(os.path.normpath(src))) - root_dir = os.path.normpath(root_dir) - else: - root_dir = src - + root_dir = src dest_member_exists = dest_exists and data_set.DataSet.files_in_data_set_members(root_dir, dest) elif src_ds_type in data_set.DataSet.MVS_PARTITIONED: dest_member_exists = dest_exists and data_set.DataSet.data_set_shared_members(src, dest) @@ -2987,17 +2970,13 @@ def run_module(module, arg_def): # original one. This change applies only to the # allocate_destination_data_set call. if converted_src: - if remote_src: - original_src = src - src = converted_src - else: - original_temp = temp_path - temp_path = converted_src + original_src = src + src = converted_src try: if not is_uss: res_args["changed"], res_args["dest_data_set_attrs"] = allocate_destination_data_set( - temp_path or src, + src, dest_name, src_ds_type, dest_ds_type, dest_exists, @@ -3010,20 +2989,14 @@ def run_module(module, arg_def): ) except Exception as err: if converted_src: - if remote_src: - src = original_src - else: - temp_path = original_temp + src = original_src module.fail_json( msg="Unable to allocate destination data set: {0}".format(str(err)), dest_exists=dest_exists ) if converted_src: - if remote_src: - src = original_src - else: - temp_path = original_temp + src = original_src # ******************************************************************** # Encoding conversion is only valid if the source is a local file, @@ -3044,7 +3017,7 @@ def run_module(module, arg_def): # if is_mvs_dest: # encoding["to"] = encode.Defaults.DEFAULT_EBCDIC_MVS_CHARSET - conv_path = copy_handler.convert_encoding(src, temp_path, encoding) + conv_path = copy_handler.convert_encoding(src, encoding, remote_src) # ------------------------------- o ----------------------------------- # Copy to USS file or directory @@ -3068,17 +3041,17 @@ def run_module(module, arg_def): src, dest, conv_path, - temp_path, src_ds_type, src_member, member_name, - force + force, + bool(content) ) res_args['size'] = os.stat(dest).st_size remote_checksum = dest_checksum = None try: - remote_checksum = get_file_checksum(temp_path or src) + remote_checksum = get_file_checksum(src) dest_checksum = get_file_checksum(dest) if validate: @@ -3100,12 +3073,11 @@ def run_module(module, arg_def): elif dest_ds_type in data_set.DataSet.MVS_SEQ: # TODO: check how ASA behaves with this if src_ds_type == "USS" and not is_binary: - new_src = conv_path or temp_path or src + new_src = conv_path or src conv_path = normalize_line_endings(new_src, encoding) copy_handler.copy_to_seq( src, - temp_path, conv_path, dest, src_ds_type @@ -3117,8 +3089,6 @@ def run_module(module, arg_def): # Copy to PDS/PDSE # --------------------------------------------------------------------- elif dest_ds_type in data_set.DataSet.MVS_PARTITIONED or dest_ds_type == "LIBRARY": - if not remote_src and not copy_member and os.path.isdir(temp_path): - temp_path = os.path.join(validation.validate_safe_path(temp_path), validation.validate_safe_path(os.path.basename(src))) pdse_copy_handler = PDSECopyHandler( module, @@ -3132,7 +3102,6 @@ def run_module(module, arg_def): pdse_copy_handler.copy_to_pdse( src, - temp_path, conv_path, dest_name, src_ds_type, @@ -3163,7 +3132,7 @@ def run_module(module, arg_def): ) ) - return res_args, temp_path, conv_path + return res_args, conv_path def main(): @@ -3185,7 +3154,7 @@ def main(): ), "to": dict( type='str', - required=True, + required=False, ) } ), @@ -3255,14 +3224,6 @@ def main(): auto_reload=dict(type='bool', default=False), ) ), - is_uss=dict(type='bool'), - is_pds=dict(type='bool'), - is_src_dir=dict(type='bool'), - is_mvs_dest=dict(type='bool'), - size=dict(type='int'), - temp_path=dict(type='str'), - src_member=dict(type='bool'), - local_charset=dict(type='str'), force=dict(type='bool', default=False), force_lock=dict(type='bool', default=False), mode=dict(type='str', required=False), @@ -3333,15 +3294,16 @@ def main(): ) if ( - not module.params.get("encoding") + not module.params.get("encoding").get("to") and not module.params.get("remote_src") and not module.params.get("is_binary") and not module.params.get("executable") ): - module.params["encoding"] = { - "from": module.params.get("local_charset"), - "to": encode.Defaults.get_default_system_charset(), - } + module.params["encoding"]["to"] = encode.Defaults.get_default_system_charset() + elif ( + not module.params.get("encoding").get("to") + ): + module.params["encoding"] = None if module.params.get("encoding"): module.params.update( @@ -3357,15 +3319,15 @@ def main(): ) ) - res_args = temp_path = conv_path = None + res_args = conv_path = None try: - res_args, temp_path, conv_path = run_module(module, arg_def) + res_args, conv_path = run_module(module, arg_def) module.exit_json(**res_args) except CopyOperationError as err: cleanup([]) module.fail_json(**(err.json_args)) finally: - cleanup([temp_path, conv_path]) + cleanup([conv_path]) class EncodingConversionError(Exception): diff --git a/tests/functional/modules/test_zos_copy_func.py b/tests/functional/modules/test_zos_copy_func.py index cf7f1494b..6e6a9a073 100644 --- a/tests/functional/modules/test_zos_copy_func.py +++ b/tests/functional/modules/test_zos_copy_func.py @@ -794,6 +794,12 @@ def test_copy_subdirs_folders_and_validate_recursive_encoding_local(ansible_zos_ @pytest.mark.uss @pytest.mark.parametrize("copy_directory", [False, True]) def test_copy_local_dir_to_non_existing_dir(ansible_zos_module, copy_directory): + """ + This test evaluates the behavior of testing copy of a directory when src ends + with '/' versus only the dir name. Expectation is that when only dir name is provided + that directory is also created on the remote, when directory name ends with '/' + this means we only copy that directory contents without creating it on the remote. + """ hosts = ansible_zos_module dest_path = "/tmp/new_dir" @@ -1684,7 +1690,7 @@ def test_copy_seq_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="seq", + type="SEQ", replace=True ) @@ -1733,7 +1739,7 @@ def test_copy_seq_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="seq", + type="SEQ", replace=True ) @@ -1784,7 +1790,7 @@ def test_copy_partitioned_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="pdse", + type="PDSE", replace=True ) @@ -1834,7 +1840,7 @@ def test_copy_partitioned_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="pdse", + type="PDSE", replace=True ) @@ -1884,7 +1890,7 @@ def test_copy_asa_data_set_to_text_file(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="seq", + type="SEQ", record_format="FBA", record_length=80, block_size=27920, @@ -1977,8 +1983,8 @@ def test_copy_dest_lock(ansible_zos_module, ds_type): hosts.all.zos_data_set(name=data_set_1, state="present", type=ds_type, replace=True) hosts.all.zos_data_set(name=data_set_2, state="present", type=ds_type, replace=True) if ds_type == "PDS" or ds_type == "PDSE": - hosts.all.zos_data_set(name=src_data_set, state="present", type="member", replace=True) - hosts.all.zos_data_set(name=dest_data_set, state="present", type="member", replace=True) + hosts.all.zos_data_set(name=src_data_set, state="present", type="MEMBER", replace=True) + hosts.all.zos_data_set(name=dest_data_set, state="present", type="MEMBER", replace=True) # copy text_in source hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(DUMMY_DATA, src_data_set)) # copy/compile c program and copy jcl to hold data set lock for n seconds in background(&) @@ -2266,7 +2272,7 @@ def test_copy_file_to_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="SEQ", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2294,7 +2300,7 @@ def test_copy_file_to_non_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="absent") + hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2432,7 +2438,7 @@ def test_copy_ps_to_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="SEQ", state="present") copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) verify_copy = hosts.all.shell( @@ -2458,7 +2464,7 @@ def test_copy_ps_to_non_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="absent") + hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2489,7 +2495,7 @@ def test_copy_ps_to_non_empty_ps_with_special_chars(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="absent") + hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") hosts.all.zos_copy(content=DUMMY_DATA_SPECIAL_CHARS, dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2520,7 +2526,7 @@ def test_backup_sequential_data_set(ansible_zos_module, backup): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="SEQ", state="present") if backup: copy_res = hosts.all.zos_copy(src=src, dest=dest, force=True, backup=True, backup_name=backup) @@ -2565,10 +2571,10 @@ def test_copy_file_to_non_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="pdse", + type="PDSE", space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=80, replace=True ) @@ -2611,14 +2617,14 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="pdse", + type="PDSE", space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=80, replace=True ) - hosts.all.zos_data_set(name=dest, type="member", state="present") + hosts.all.zos_data_set(name=dest, type="MEMBER", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, force=src["force"], remote_src=src["is_remote"]) @@ -2647,31 +2653,31 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="seq", is_binary=False), - dict(type="seq", is_binary=True), - dict(type="pds", is_binary=False), - dict(type="pds", is_binary=True), - dict(type="pdse", is_binary=False), - dict(type="pdse", is_binary=True) + dict(type="SEQ", is_binary=False), + dict(type="SEQ", is_binary=True), + dict(type="PDS", is_binary=False), + dict(type="PDS", is_binary=True), + dict(type="PDSE", is_binary=False), + dict(type="PDSE", is_binary=True) ]) def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "SEQ" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "seq": - hosts.all.zos_data_set(name=src, type="member") + if args["type"] != "SEQ": + hosts.all.zos_data_set(name=src, type="MEMBER") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) + hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) copy_result = hosts.all.zos_copy(src=src, dest=dest, is_binary=args["is_binary"], remote_src=True) verify_copy = hosts.all.shell( @@ -2694,32 +2700,32 @@ def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="seq", force=False), - dict(type="seq", force=True), - dict(type="pds", force=False), - dict(type="pds", force=True), - dict(type="pdse", force=False), - dict(type="pdse", force=True) + dict(type="SEQ", force=False), + dict(type="SEQ", force=True), + dict(type="PDS", force=False), + dict(type="PDS", force=True), + dict(type="PDSE", force=False), + dict(type="PDSE", force=True) ]) def test_copy_data_set_to_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "SEQ" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "seq": - hosts.all.zos_data_set(name=src, type="member") + if args["type"] != "SEQ": + hosts.all.zos_data_set(name=src, type="MEMBER") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) - hosts.all.zos_data_set(name=dest, type="member") + hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) + hosts.all.zos_data_set(name=dest, type="MEMBER") copy_result = hosts.all.zos_copy(src=src, dest=dest, force=args["force"], remote_src=True) verify_copy = hosts.all.shell( @@ -2838,7 +2844,7 @@ def test_copy_dir_crlf_endings_to_non_existing_pdse(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_dir = "/tmp/testdir" @@ -2854,7 +2860,7 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): type=src_type, space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=80, ) @@ -2877,18 +2883,18 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["SEQ", "PDS", "PDSE"]) def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if src_type == "seq" else "{0}(TEST)".format(src_data_set) + src = src_data_set if src_type == "SEQ" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=src_type) - if src_type != "seq": - hosts.all.zos_data_set(name=src, type="member") + if src_type != "SEQ": + hosts.all.zos_data_set(name=src, type="MEMBER") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), @@ -2918,10 +2924,10 @@ def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(src_type="pds", dest_type="pds"), - dict(src_type="pds", dest_type="pdse"), - dict(src_type="pdse", dest_type="pds"), - dict(src_type="pdse", dest_type="pdse"), + dict(src_type="PDS", dest_type="PDS"), + dict(src_type="PDS", dest_type="PDSE"), + dict(src_type="PDSE", dest_type="PDS"), + dict(src_type="PDSE", dest_type="PDSE"), ]) def test_copy_pds_to_existing_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -2973,7 +2979,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDS", space_primary=2, record_format="FB", record_length=80, @@ -2984,7 +2990,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3006,7 +3012,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3018,7 +3024,7 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3111,7 +3117,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3122,7 +3128,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDS", space_primary=2, record_format="FB", record_length=80, @@ -3132,7 +3138,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3143,7 +3149,7 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3261,7 +3267,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDS", space_primary=2, record_format="FB", record_length=80, @@ -3272,7 +3278,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3300,7 +3306,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3312,7 +3318,7 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3453,7 +3459,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDS", space_primary=2, record_format="FB", record_length=80, @@ -3464,7 +3470,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3524,7 +3530,7 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3615,7 +3621,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="pds", + type="PDS", space_primary=2, record_format="FB", record_length=80, @@ -3626,7 +3632,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3651,7 +3657,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3663,7 +3669,7 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3827,7 +3833,7 @@ def test_copy_executables_uss_to_member(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest, state="present", - type="pdse", + type="PDSE", record_format="U", record_length=0, block_size=32760, @@ -3878,7 +3884,7 @@ def test_copy_pds_member_with_system_symbol(ansible_zos_module): hosts.all.zos_data_set( name=dest, state="present", - type="pdse", + type="PDSE", replace=True ) @@ -3914,8 +3920,8 @@ def test_copy_multiple_data_set_members(ansible_zos_module): ds_list = ["{0}({1})".format(src, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="pds") - hosts.all.zos_data_set(name=dest, type="pds") + hosts.all.zos_data_set(name=src, type="PDS") + hosts.all.zos_data_set(name=dest, type="PDS") for member in ds_list: hosts.all.shell( @@ -3960,8 +3966,8 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): dest_ds_list = ["{0}({1})".format(dest, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="pds") - hosts.all.zos_data_set(name=dest, type="pds") + hosts.all.zos_data_set(name=src, type="PDS") + hosts.all.zos_data_set(name=dest, type="PDS") for src_member in src_ds_list: hosts.all.shell( @@ -3994,7 +4000,7 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("ds_type", ["pds", "pdse"]) +@pytest.mark.parametrize("ds_type", ["PDS", "PDSE"]) def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): hosts = ansible_zos_module data_set = get_tmp_ds_name() @@ -4032,10 +4038,10 @@ def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(ds_type="pds", force=False), - dict(ds_type="pds", force=True), - dict(ds_type="pdse", force=False), - dict(ds_type="pdse", force=True) + dict(ds_type="PDS", force=False), + dict(ds_type="PDS", force=True), + dict(ds_type="PDSE", force=False), + dict(ds_type="PDSE", force=True) ]) def test_copy_member_to_existing_uss_file(ansible_zos_module, args): hosts = ansible_zos_module @@ -4079,7 +4085,7 @@ def test_copy_member_to_existing_uss_file(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4124,7 +4130,7 @@ def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) def test_copy_member_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4170,7 +4176,7 @@ def test_copy_member_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4206,10 +4212,10 @@ def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="pds", force=False), - dict(type="pds", force=True), - dict(type="pdse", force=False), - dict(type="pdse", force=True), + dict(type="PDS", force=False), + dict(type="PDS", force=True), + dict(type="PDSE", force=False), + dict(type="PDSE", force=True), ]) def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): hosts = ansible_zos_module @@ -4218,7 +4224,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="seq", state="present", replace=True) + hosts.all.zos_data_set(name=dest, type="SEQ", state="present", replace=True) hosts.all.zos_data_set(name=src_ds, type=args["type"], state="present") for data_set in [src, dest]: @@ -4251,7 +4257,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("dest_type", ["pds", "pdse"]) +@pytest.mark.parametrize("dest_type", ["PDS", "PDSE"]) def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): hosts = ansible_zos_module src = "/etc/profile" @@ -4262,7 +4268,7 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): type=dest_type, space_primary=5, space_type="M", - record_format="fba", + record_format="FBA", record_length=25, ) @@ -4294,10 +4300,10 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="pds", backup=None), - dict(type="pds", backup="USER.TEST.PDS.BACKUP"), - dict(type="pdse", backup=None), - dict(type="pdse", backup="USER.TEST.PDSE.BACKUP"), + dict(type="PDS", backup=None), + dict(type="PDS", backup="USER.TEST.PDS.BACKUP"), + dict(type="PDSE", backup=None), + dict(type="PDSE", backup="USER.TEST.PDSE.BACKUP"), ]) def test_backup_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -4343,7 +4349,7 @@ def test_backup_pds(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) +@pytest.mark.parametrize("src_type", ["SEQ", "PDS", "PDSE"]) def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_type): hosts = ansible_zos_module source = get_tmp_ds_name() @@ -4359,8 +4365,8 @@ def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_typ try: hosts.all.zos_data_set(name=source, type=src_type, state='present') - if src_type != "seq": - hosts.all.zos_data_set(name=source_member, type="member", state='present') + if src_type != "SEQ": + hosts.all.zos_data_set(name=source_member, type="MEMBER", state='present') copy_res = hosts.all.zos_copy( src=source, @@ -4631,7 +4637,7 @@ def test_copy_uss_file_to_existing_sequential_data_set_twice_with_tmphlq_option( src_file = "/etc/profile" tmphlq = "TMPHLQ" try: - hosts.all.zos_data_set(name=dest, type="seq", state="present") + hosts.all.zos_data_set(name=dest, type="SEQ", state="present") copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, force=force) copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, backup=True, tmp_hlq=tmphlq, force=force) diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 0167d6c81..c04ae2328 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -2,8 +2,6 @@ plugins/modules/zos_apf.py validate-modules:missing-gplv3-license # Licensed und plugins/modules/zos_backup_restore.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_blockinfile.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_copy.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 -plugins/modules/zos_copy.py validate-modules:parameter-type-not-in-doc # Passing args from action plugin -plugins/modules/zos_copy.py validate-modules:undocumented-parameter # Passing args from action plugin plugins/modules/zos_data_set.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_encode.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_fetch.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.15.txt index 0167d6c81..c04ae2328 100644 --- a/tests/sanity/ignore-2.15.txt +++ b/tests/sanity/ignore-2.15.txt @@ -2,8 +2,6 @@ plugins/modules/zos_apf.py validate-modules:missing-gplv3-license # Licensed und plugins/modules/zos_backup_restore.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_blockinfile.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_copy.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 -plugins/modules/zos_copy.py validate-modules:parameter-type-not-in-doc # Passing args from action plugin -plugins/modules/zos_copy.py validate-modules:undocumented-parameter # Passing args from action plugin plugins/modules/zos_data_set.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_encode.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_fetch.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt index 0167d6c81..c04ae2328 100644 --- a/tests/sanity/ignore-2.16.txt +++ b/tests/sanity/ignore-2.16.txt @@ -2,8 +2,6 @@ plugins/modules/zos_apf.py validate-modules:missing-gplv3-license # Licensed und plugins/modules/zos_backup_restore.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_blockinfile.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_copy.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 -plugins/modules/zos_copy.py validate-modules:parameter-type-not-in-doc # Passing args from action plugin -plugins/modules/zos_copy.py validate-modules:undocumented-parameter # Passing args from action plugin plugins/modules/zos_data_set.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_encode.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 plugins/modules/zos_fetch.py validate-modules:missing-gplv3-license # Licensed under Apache 2.0 From ed26cf81b29f896b477156252a23f9ba9fb645d2 Mon Sep 17 00:00:00 2001 From: Demetri Date: Tue, 2 Apr 2024 09:04:12 -0700 Subject: [PATCH 09/28] Cherry picking 1.9 into dev (#1346) * [v1.9.0] Collaboration 1246 to add typrun support for zos_job_submit (#1283) * Fixes typo in property Signed-off-by: ddimatos * Initial commit for supporting typrun=scan Signed-off-by: ddimatos * Update jobs and zos_job_submit to better support jobs in the input queue Signed-off-by: ddimatos * Update zos_job_submit to remove other typrun scans from regex Signed-off-by: ddimatos * The ret_code msg field should have only had the status in it, not the RC Signed-off-by: ddimatos * Update msg_txt for jobs JCLHOlD, HOLD Signed-off-by: ddimatos * Update test cases with typrun Signed-off-by: ddimatos * Lint updates Signed-off-by: ddimatos * Updates to controll the messages to the ret_code property Signed-off-by: ddimatos * Update wait times as result of the timer fix forced tests to add more time Signed-off-by: ddimatos * Changelog fragment Signed-off-by: ddimatos * Changelog fragment Signed-off-by: ddimatos --------- Signed-off-by: ddimatos Conflicts: plugins/module_utils/job.py plugins/modules/zos_job_submit.py tests/functional/modules/test_zos_job_submit_func.py Changes to be committed: new file: changelogs/fragments/1246-bugfix-zos_job_submit-typrun.yml modified: plugins/module_utils/job.py modified: plugins/modules/zos_job_submit.py modified: tests/functional/modules/test_zos_job_submit_func.py * [v1.9.0] Document the collections SFTP requirement and file tagging. (#1296) * Fixes typo in property Signed-off-by: ddimatos * Initial commit for supporting typrun=scan Signed-off-by: ddimatos * Update jobs and zos_job_submit to better support jobs in the input queue Signed-off-by: ddimatos * Update zos_job_submit to remove other typrun scans from regex Signed-off-by: ddimatos * The ret_code msg field should have only had the status in it, not the RC Signed-off-by: ddimatos * Update msg_txt for jobs JCLHOlD, HOLD Signed-off-by: ddimatos * Update test cases with typrun Signed-off-by: ddimatos * Lint updates Signed-off-by: ddimatos * Updates to controll the messages to the ret_code property Signed-off-by: ddimatos * Update wait times as result of the timer fix forced tests to add more time Signed-off-by: ddimatos * Changelog fragment Signed-off-by: ddimatos * Changelog fragment Signed-off-by: ddimatos * Update doc for zos_archive to reference src over path Signed-off-by: ddimatos * Update docs to reference the SFTP requirement Signed-off-by: ddimatos * Update plugin doc Signed-off-by: ddimatos * Add changelog fragment Signed-off-by: ddimatos * Update zos_copy to explain that file tagging (chtag) is performed on updated USS files Signed-off-by: ddimatos * Corrected typo Signed-off-by: ddimatos * Corrected typo Signed-off-by: ddimatos * Corrected typo Signed-off-by: ddimatos --------- Signed-off-by: ddimatos Co-authored-by: Fernando Flores * update galaxy.yml Signed-off-by: ddimatos * Updte meta/runtime.yml with the version 2.15 Signed-off-by: ddimatos * Update meta collection with lastest versions Signed-off-by: ddimatos * Update README Signed-off-by: ddimatos * Update lint and galaxy to reflect 2.14 Signed-off-by: ddimatos * Add changelog summary Signed-off-by: ddimatos * update versions for zoau version checker Signed-off-by: ddimatos * Fix array syntax Signed-off-by: ddimatos * Documentation required for wtor filter Signed-off-by: ddimatos * Update changelog Signed-off-by: ddimatos * Update changelog meta Signed-off-by: ddimatos * update filters general doc Signed-off-by: ddimatos * Update submit modules doc Signed-off-by: ddimatos Conflicts: plugins/modules/zos_job_submit.py Changes to be committed: modified: plugins/modules/zos_job_submit.py * Update the rst for submit module Signed-off-by: ddimatos * Update release notes rst Signed-off-by: ddimatos * Correct lint warning Signed-off-by: ddimatos * Update zos_copy module doc Signed-off-by: ddimatos * Update RST for zos_copy Signed-off-by: ddimatos * Update copyright year Signed-off-by: ddimatos Conflicts: plugins/modules/zos_apf.py Changes to be committed: modified: docs/source/release_notes.rst modified: tests/functional/modules/test_zos_job_query_func.py * Delete changelog fragments after generating CHANGELOG Signed-off-by: ddimatos Conflicts: changelogs/fragments/1220-bugfix-zos_job_submit-default_value.yml changelogs/fragments/1261-job-submit-non-utf8-chars.yml changelogs/fragments/1292-doc-zos_tso_command-example.yml changelogs/fragments/1295-doc-zos_ping-scp.yml Changes to be committed: deleted: changelogs/fragments/1246-bugfix-zos_job_submit-typrun.yml deleted: changelogs/fragments/1296-doc-sftp-collection-requirements.yml deleted: changelogs/fragments/v1.9.0_summary.yml * Update source comment to align to code change Signed-off-by: ddimatos * Update source documentation after pull request review Signed-off-by: ddimatos Conflicts: plugins/modules/zos_job_submit.py Changes to be committed: modified: docs/source/release_notes.rst modified: plugins/filter/wtor.py modified: plugins/module_utils/job.py modified: plugins/modules/zos_copy.py modified: plugins/modules/zos_job_submit.py modified: plugins/modules/zos_ping.py modified: plugins/modules/zos_tso_command.py modified: tests/functional/modules/test_zos_job_query_func.py modified: tests/functional/modules/test_zos_job_submit_func.py * Typo correction Signed-off-by: ddimatos * Update Galaxy Signed-off-by: ddimatos * Update RST Signed-off-by: ddimatos * Changes to submit module after forward porting typrun support Signed-off-by: ddimatos * Lint corrections Signed-off-by: ddimatos * Update test cases to use upper case data set types due to choice requirments Signed-off-by: ddimatos * Updated test expected text Signed-off-by: ddimatos * Corrected typo and added test cleanup --------- Signed-off-by: ddimatos Co-authored-by: Fernando Flores --- .ansible-lint | 1 + CHANGELOG.rst | 22 +- README.md | 11 +- changelogs/.plugin-cache.yaml | 8 +- changelogs/changelog.yaml | 70 +++++ docs/source/filters.rst | 10 +- docs/source/modules/zos_archive.rst | 10 +- docs/source/modules/zos_backup_restore.rst | 9 + docs/source/modules/zos_copy.rst | 6 +- docs/source/modules/zos_data_set.rst | 28 +- docs/source/modules/zos_fetch.rst | 2 +- docs/source/modules/zos_job_submit.rst | 53 ++-- docs/source/modules/zos_script.rst | 4 +- docs/source/modules/zos_tso_command.rst | 2 +- docs/source/modules/zos_unarchive.rst | 18 +- docs/source/plugins.rst | 37 +-- docs/source/release_notes.rst | 93 +++++-- galaxy.yml | 3 +- meta/ibm_zos_core_meta.yml | 6 +- meta/runtime.yml | 2 +- plugins/filter/wtor.py | 55 ++++ plugins/module_utils/job.py | 137 ++++++---- plugins/modules/zos_copy.py | 23 +- plugins/modules/zos_fetch.py | 9 +- plugins/modules/zos_job_submit.py | 253 ++++++++++++------ plugins/modules/zos_ping.py | 2 +- plugins/modules/zos_ping.rexx | 2 +- plugins/modules/zos_script.py | 11 +- plugins/modules/zos_tso_command.py | 2 +- plugins/modules/zos_unarchive.py | 13 +- .../modules/test_zos_job_query_func.py | 6 +- .../modules/test_zos_job_submit_func.py | 232 +++++++++++++--- tests/unit/test_zoau_version_checker_unit.py | 16 +- 33 files changed, 842 insertions(+), 314 deletions(-) diff --git a/.ansible-lint b/.ansible-lint index 821806e3a..9d40faf3b 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -35,6 +35,7 @@ exclude_paths: - tests/sanity/ignore-2.11.txt - tests/sanity/ignore-2.12.txt - tests/sanity/ignore-2.13.txt + - tests/sanity/ignore-2.14.txt - venv* parseable: true quiet: false diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 505a98474..d2f69d546 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,18 +5,23 @@ ibm.ibm_zos_core Release Notes .. contents:: Topics -v1.9.0-beta.1 -============= +v1.9.0 +====== Release Summary --------------- -Release Date: '2024-01-31' +Release Date: '2024-03-11' This changelog describes all changes made to the modules and plugins included in this collection. The release date is the date the changelog is created. For additional details such as required dependencies and availability review the collections `release notes `__ +Major Changes +------------- + +- zos_job_submit - when job statuses were read, were limited to AC (active), CC (completed normally), ABEND (ended abnormally) and ? (error unknown), SEC (security error), JCLERROR (job had a jcl error). Now the additional statuses are supported, CANCELLED (job was cancelled), CAB (converter abend), CNV (converter error), SYS (system failure) and FLU (job was flushed). (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + Minor Changes ------------- @@ -32,11 +37,22 @@ Minor Changes Bugfixes -------- +- module_utils/job.py - job output containing non-printable characters would crash modules. Fix now handles the error gracefully and returns a message to the user inside `content` of the `ddname` that failed. (https://github.com/ansible-collections/ibm_zos_core/pull/1288). +- zos_apf - When operation=list was selected and more than one data set entry was fetched, the module only returned one data set. Fix now returns the complete list. (https://github.com/ansible-collections/ibm_zos_core/pull/1236). - zos_copy - When copying an executable data set with aliases and destination did not exist, destination data set was created with wrong attributes. Fix now creates destination data set with the same attributes as the source. (https://github.com/ansible-collections/ibm_zos_core/pull/1066). - zos_copy - When performing a copy operation to an existing file, the copied file resulted in having corrupted contents. Fix now implements a workaround to not use the specific copy routine that corrupts the file contents. (https://github.com/ansible-collections/ibm_zos_core/pull/1064). +- zos_data_set - Fixes a small parsing bug in module_utils/data_set function which extracts volume serial(s) from a LISTCAT command output. Previously a leading '-' was left behind for volser strings under 6 chars. (https://github.com/ansible-collections/ibm_zos_core/pull/1247). - zos_job_output - When passing a job ID or name less than 8 characters long, the module sent the full stack trace as the module's message. Change now allows the use of a shorter job ID or name, as well as wildcards. (https://github.com/ansible-collections/ibm_zos_core/pull/1078). - zos_job_query - The module handling ZOAU import errors obscured the original traceback when an import error ocurred. Fix now passes correctly the context to the user. (https://github.com/ansible-collections/ibm_zos_core/pull/1042). - zos_job_query - When passing a job ID or name less than 8 characters long, the module sent the full stack trace as the module's message. Change now allows the use of a shorter job ID or name, as well as wildcards. (https://github.com/ansible-collections/ibm_zos_core/pull/1078). +- zos_job_submit - Was ignoring the default value for location=DATA_SET, now when location is not specified it will default to DATA_SET. (https://github.com/ansible-collections/ibm_zos_core/pull/1120). +- zos_job_submit - when a JCL error occurred, the ret_code[msg_code] contained JCLERROR followed by an integer where the integer appeared to be a reason code when actually it is a multi line marker used to coordinate errors spanning more than one line. Now when a JCLERROR occurs, only the JCLERROR is returned for property ret_code[msg_code]. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). +- zos_job_submit - when a response was returned, it contained an undocumented property; ret_code[msg_text]. Now when a response is returned, it correctly returns property ret_code[msg_txt]. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). +- zos_job_submit - when typrun=copy was used in JCL it would fail the module with an improper message and error condition. While this case continues to be considered a failure, the message has been corrected and it fails under the condition that not enough time has been added to the modules execution. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). +- zos_job_submit - when typrun=hold was used in JCL it would fail the module with an improper message and error condition. While this case continues to be considered a failure, the message has been corrected and it fails under the condition that not enough time has been added to the modules execution. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). +- zos_job_submit - when typrun=jchhold was used in JCL it would fail the module with an improper message and error condition. While this case continues to be considered a failure, the message has been corrected and it fails under the condition that not enough time has been added to the modules execution. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). +- zos_job_submit - when typrun=scan was used in JCL, it would fail the module. Now typrun=scan no longer fails the module and an appropriate message is returned with appropriate return code values. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). +- zos_job_submit - when wait_time_s was used, the duration would run approximately 5 second longer than reported in the duration. Now the when duration is returned, it is the actual accounting from when the job is submitted to when the module reads the job output. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). - zos_operator - The module handling ZOAU import errors obscured the original traceback when an import error ocurred. Fix now passes correctly the context to the user. (https://github.com/ansible-collections/ibm_zos_core/pull/1042). - zos_unarchive - Using a local file with a USS format option failed when sending to remote because dest_data_set option had an empty dictionary. Fix now leaves dest_data_set as None when using a USS format option. (https://github.com/ansible-collections/ibm_zos_core/pull/1045). - zos_unarchive - When unarchiving USS files, the module left temporary files on the remote. Change now removes temporary files. (https://github.com/ansible-collections/ibm_zos_core/pull/1073). diff --git a/README.md b/README.md index da3b114d4..b2345c118 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ and ansible-doc to automate tasks on z/OS. Ansible version compatibility ============================= -This collection has been tested against **Ansible Core** versions >=2.14. +This collection has been tested against **Ansible Core** versions >=2.15. The Ansible Core versions supported for this collection align to the [ansible-core support matrix](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix). Review the [Ansible community changelogs](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-community-changelogs) for corresponding **Ansible community packages** @@ -64,11 +64,12 @@ for more more information on supported versions of Ansible. Other Dependencies ================== -This release of the **IBM z/OS core collection** requires the z/OS managed node have: -- [z/OS](https://www.ibm.com/docs/en/zos) V2R4 or later. +This release of the **IBM z/OS core collection** requires the z/OS managed node have the following: +- [z/OS](https://www.ibm.com/docs/en/zos) - [z/OS shell](https://www.ibm.com/support/knowledgecenter/en/SSLTBW_2.4.0/com.ibm.zos.v2r4.bpxa400/part1.htm). -- [IBM Open Enterprise SDK for Python](https://www.ibm.com/products/open-enterprise-python-zos) 3.9 - 3.11. -- [IBM Z Open Automation Utilities](https://www.ibm.com/docs/en/zoau/1.2.x) 1.2.5 (or later) but prior to version 1.3. +- [IBM Open Enterprise SDK for Python](https://www.ibm.com/products/open-enterprise-python-zos) +- [IBM Z Open Automation Utilities](https://www.ibm.com/docs/en/zoau/1.2.x) +For specific dependency versions, please review the [release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html) for the version of the IBM Ansible z/OS core installed. Copyright ========= diff --git a/changelogs/.plugin-cache.yaml b/changelogs/.plugin-cache.yaml index 899014cd9..4e2979ebb 100644 --- a/changelogs/.plugin-cache.yaml +++ b/changelogs/.plugin-cache.yaml @@ -6,7 +6,11 @@ plugins: callback: {} cliconf: {} connection: {} - filter: {} + filter: + filter_wtor_messages: + description: Filter a list of WTOR messages + name: filter_wtor_messages + version_added: 1.2.0 httpapi: {} inventory: {} lookup: {} @@ -131,4 +135,4 @@ plugins: strategy: {} test: {} vars: {} -version: 1.9.0-beta.1 +version: 1.9.0 diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index c05af6436..a8404bf84 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1176,6 +1176,76 @@ releases: name: zos_script namespace: '' release_date: '2023-10-24' + 1.9.0: + changes: + bugfixes: + - module_utils/job.py - job output containing non-printable characters would + crash modules. Fix now handles the error gracefully and returns a message + to the user inside `content` of the `ddname` that failed. (https://github.com/ansible-collections/ibm_zos_core/pull/1288). + - zos_apf - When operation=list was selected and more than one data set entry + was fetched, the module only returned one data set. Fix now returns the complete + list. (https://github.com/ansible-collections/ibm_zos_core/pull/1236). + - zos_data_set - Fixes a small parsing bug in module_utils/data_set function + which extracts volume serial(s) from a LISTCAT command output. Previously + a leading '-' was left behind for volser strings under 6 chars. (https://github.com/ansible-collections/ibm_zos_core/pull/1247). + - zos_job_submit - Was ignoring the default value for location=DATA_SET, now + when location is not specified it will default to DATA_SET. (https://github.com/ansible-collections/ibm_zos_core/pull/1120). + - zos_job_submit - when a JCL error occurred, the ret_code[msg_code] contained + JCLERROR followed by an integer where the integer appeared to be a reason + code when actually it is a multi line marker used to coordinate errors spanning + more than one line. Now when a JCLERROR occurs, only the JCLERROR is returned + for property ret_code[msg_code]. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + - zos_job_submit - when a response was returned, it contained an undocumented + property; ret_code[msg_text]. Now when a response is returned, it correctly + returns property ret_code[msg_txt]. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + - zos_job_submit - when typrun=copy was used in JCL it would fail the module + with an improper message and error condition. While this case continues to + be considered a failure, the message has been corrected and it fails under + the condition that not enough time has been added to the modules execution. + (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + - zos_job_submit - when typrun=hold was used in JCL it would fail the module + with an improper message and error condition. While this case continues to + be considered a failure, the message has been corrected and it fails under + the condition that not enough time has been added to the modules execution. + (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + - zos_job_submit - when typrun=jchhold was used in JCL it would fail the module + with an improper message and error condition. While this case continues to + be considered a failure, the message has been corrected and it fails under + the condition that not enough time has been added to the modules execution. + (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + - zos_job_submit - when typrun=scan was used in JCL, it would fail the module. + Now typrun=scan no longer fails the module and an appropriate message is returned + with appropriate return code values. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + - zos_job_submit - when wait_time_s was used, the duration would run approximately + 5 second longer than reported in the duration. Now the when duration is returned, + it is the actual accounting from when the job is submitted to when the module + reads the job output. (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + major_changes: + - zos_job_submit - when job statuses were read, were limited to AC (active), + CC (completed normally), ABEND (ended abnormally) and ? (error unknown), SEC + (security error), JCLERROR (job had a jcl error). Now the additional statuses + are supported, CANCELLED (job was cancelled), CAB (converter abend), CNV (converter + error), SYS (system failure) and FLU (job was flushed). (https://github.com/ansible-collections/ibm_zos_core/pull/1283). + release_summary: 'Release Date: ''2024-03-11'' + + This changelog describes all changes made to the modules and plugins included + + in this collection. The release date is the date the changelog is created. + + For additional details such as required dependencies and availability review + + the collections `release notes `__' + fragments: + - 1120-bugfix-zos_job_submit-default_value.yml + - 1236-bugfix-zos_apf-return-list.yml + - 1246-bugfix-zos_job_submit-typrun.yml + - 1247-volser-parsing-leading-dash-bugfix.yml + - 1288-job-submit-non-utf8-chars.yml + - 1292-doc-zos_tso_command-example.yml + - 1294-doc-zos_ping-scp.yml + - 1296-doc-sftp-collection-requirements.yml + - v1.9.0_summary.yml + release_date: '2024-03-16' 1.9.0-beta.1: changes: bugfixes: diff --git a/docs/source/filters.rst b/docs/source/filters.rst index 51e3a034f..bbf24c6d4 100644 --- a/docs/source/filters.rst +++ b/docs/source/filters.rst @@ -5,13 +5,9 @@ Filters ======= -Filters in Ansible are from Jinja2, and are used to transform data inside -a template expression. The templates operate on the Ansible controller, and not -on the target host. Therefore, filters execute on the controller as they augment -the data locally. - -Jinja2 ships with many filters as does Ansible, and also allows users to add -their own custom filters. +Filters are used to transform data inside a template expression. The templates +operate on the Ansible controller, not on the managed node. Therefore, +filters execute on the controller as they augment the data locally. The **IBM z/OS core collection** includes filters and their usage in sample playbooks. Unlike collections that can be identified at the top level using the diff --git a/docs/source/modules/zos_archive.rst b/docs/source/modules/zos_archive.rst index 525c7c0be..fe93474f0 100644 --- a/docs/source/modules/zos_archive.rst +++ b/docs/source/modules/zos_archive.rst @@ -342,7 +342,7 @@ Examples # Simple archive - name: Archive file into a tar zos_archive: - path: /tmp/archive/foo.txt + src: /tmp/archive/foo.txt dest: /tmp/archive/foo_archive_test.tar format: name: tar @@ -350,7 +350,7 @@ Examples # Archive multiple files - name: Compress list of files into a zip zos_archive: - path: + src: - /tmp/archive/foo.txt - /tmp/archive/bar.txt dest: /tmp/archive/foo_bar_archive_test.zip @@ -360,7 +360,7 @@ Examples # Archive one data set into terse - name: Compress data set into a terse zos_archive: - path: "USER.ARCHIVE.TEST" + src: "USER.ARCHIVE.TEST" dest: "USER.ARCHIVE.RESULT.TRS" format: name: terse @@ -368,7 +368,7 @@ Examples # Use terse with different options - name: Compress data set into a terse, specify pack algorithm and use adrdssu zos_archive: - path: "USER.ARCHIVE.TEST" + src: "USER.ARCHIVE.TEST" dest: "USER.ARCHIVE.RESULT.TRS" format: name: terse @@ -379,7 +379,7 @@ Examples # Use a pattern to store - name: Compress data set pattern using xmit zos_archive: - path: "USER.ARCHIVE.*" + src: "USER.ARCHIVE.*" exclude_sources: "USER.ARCHIVE.EXCLUDE.*" dest: "USER.ARCHIVE.RESULT.XMIT" format: diff --git a/docs/source/modules/zos_backup_restore.rst b/docs/source/modules/zos_backup_restore.rst index cc6c60d66..d70efc7a1 100644 --- a/docs/source/modules/zos_backup_restore.rst +++ b/docs/source/modules/zos_backup_restore.rst @@ -200,6 +200,15 @@ hlq | **type**: str +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup data sets. + + The default HLQ is the Ansible user that executes the module and if that is not available, then the value of ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + Examples diff --git a/docs/source/modules/zos_copy.rst b/docs/source/modules/zos_copy.rst index 86a3a9463..00e274b00 100644 --- a/docs/source/modules/zos_copy.rst +++ b/docs/source/modules/zos_copy.rst @@ -91,6 +91,8 @@ dest If ``dest`` is a nonexistent USS file, it will be created. + If ``dest`` is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the copy will fail. + If ``dest`` is a nonexistent data set, it will be created following the process outlined here and in the ``volume`` option. If ``dest`` is a nonexistent data set, the attributes assigned will depend on the type of ``src``. If ``src`` is a USS file, ``dest`` will have a Fixed Block (FB) record format and the remaining attributes will be computed. If *is_binary=true*, ``dest`` will have a Fixed Block (FB) record format with a record length of 80, block size of 32760, and the remaining attributes will be computed. If *executable=true*,``dest`` will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. @@ -787,9 +789,9 @@ Notes For supported character sets used to encode data, refer to the `documentation `_. - `zos_copy <./zos_copy.html>`_ uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; Co:Z SFTP is not supported. In the case of Co:z SFTP, you can exempt the Ansible userid on z/OS from using Co:Z thus falling back to using standard SFTP. + This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - Beginning in version 1.8.x, zos_copy will no longer attempt to autocorrect a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option executable that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will be responded with a (FSUM8976,./zos_copy.html) error. + Beginning in version 1.8.x, zos_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option ``executable`` that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos_copy.html) error. diff --git a/docs/source/modules/zos_data_set.rst b/docs/source/modules/zos_data_set.rst index 70e798a08..0ea34875f 100644 --- a/docs/source/modules/zos_data_set.rst +++ b/docs/source/modules/zos_data_set.rst @@ -97,7 +97,7 @@ type ``MEMBER`` expects to be used with an existing partitioned data set. - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str @@ -139,7 +139,7 @@ space_type record_format The format of the data set. (e.g ``FB``) - Choices are case-insensitive. + Choices are case-sensitive. When *type=KSDS*, *type=ESDS*, *type=RRDS*, *type=LDS* or *type=ZFS* then *record_format=None*, these types do not have a default *record_format*. @@ -370,7 +370,7 @@ batch ``MEMBER`` expects to be used with an existing partitioned data set. - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str @@ -412,7 +412,7 @@ batch record_format The format of the data set. (e.g ``FB``) - Choices are case-insensitive. + Choices are case-sensitive. When *type=KSDS*, *type=ESDS*, *type=RRDS*, *type=LDS* or *type=ZFS* then *record_format=None*, these types do not have a default *record_format*. @@ -568,7 +568,7 @@ Examples - name: Create a sequential data set if it does not exist zos_data_set: name: someds.name.here - type: seq + type: SEQ state: present - name: Create a PDS data set if it does not exist @@ -577,26 +577,26 @@ Examples type: pds space_primary: 5 space_type: M - record_format: fba + record_format: FBA record_length: 25 - name: Attempt to replace a data set if it exists zos_data_set: name: someds.name.here - type: pds + type: PDS space_primary: 5 space_type: M - record_format: u + record_format: U record_length: 25 replace: yes - name: Attempt to replace a data set if it exists. If not found in the catalog, check if it is available on volume 222222, and catalog if found. zos_data_set: name: someds.name.here - type: pds + type: PDS space_primary: 5 space_type: M - record_format: u + record_format: U record_length: 25 volumes: "222222" replace: yes @@ -604,19 +604,19 @@ Examples - name: Create an ESDS data set if it does not exist zos_data_set: name: someds.name.here - type: esds + type: ESDS - name: Create a KSDS data set if it does not exist zos_data_set: name: someds.name.here - type: ksds + type: KSDS key_length: 8 key_offset: 0 - name: Create an RRDS data set with storage class MYDATA if it does not exist zos_data_set: name: someds.name.here - type: rrds + type: RRDS sms_storage_class: mydata - name: Delete a data set if it exists @@ -661,7 +661,7 @@ Examples type: PDS space_primary: 5 space_type: M - record_format: fb + record_format: FB replace: yes - name: someds.name.here1(member1) type: MEMBER diff --git a/docs/source/modules/zos_fetch.rst b/docs/source/modules/zos_fetch.rst index 21b573a2a..87a50a65a 100644 --- a/docs/source/modules/zos_fetch.rst +++ b/docs/source/modules/zos_fetch.rst @@ -204,7 +204,7 @@ Notes For supported character sets used to encode data, refer to the `documentation `_. - `zos_fetch <./zos_fetch.html>`_ uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; Co:Z SFTP is not supported. In the case of Co:z SFTP, you can exempt the Ansible userid on z/OS from using Co:Z thus falling back to using standard SFTP. + This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. diff --git a/docs/source/modules/zos_job_submit.rst b/docs/source/modules/zos_job_submit.rst index 6cff37a6a..8f4dda61b 100644 --- a/docs/source/modules/zos_job_submit.rst +++ b/docs/source/modules/zos_job_submit.rst @@ -16,9 +16,8 @@ zos_job_submit -- Submit JCL Synopsis -------- -- Submit JCL from a data set, USS, or from the controller. -- Submit a job and optionally monitor for completion. -- Optionally, wait a designated time until the job finishes. +- Submit JCL in a data set, USS file, or file on the controller. +- Submit a job and monitor for completion. - For an uncataloged dataset, specify the volume serial number. @@ -57,18 +56,6 @@ location | **choices**: DATA_SET, USS, LOCAL -wait - Setting this option will yield no change, it is deprecated. There is no no need to set *wait*; setting *wait_times_s* is the correct way to configure the amount of tme to wait for a job to execute. - - Configuring wait used by the `zos_job_submit <./zos_job_submit.html>`_ module has been deprecated and will be removed in ibm.ibm_zos_core collection. - - See option *wait_time_s*. - - | **required**: False - | **type**: bool - | **default**: False - - wait_time_s Option *wait_time_s* is the total time that module `zos_job_submit <./zos_job_submit.html>`_ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. @@ -333,6 +320,8 @@ Notes .. note:: For supported character sets used to encode data, refer to the `documentation `_. + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + @@ -689,24 +678,46 @@ jobs } msg - Return code resulting from the job submission. Jobs that take longer to assign a value can have a value of '?'. + Job status resulting from the job submission. + + Job status `ABEND` indicates the job ended abnormally. + + Job status `AC` indicates the job is active, often a started task or job taking long. + + Job status `CAB` indicates a converter abend. + + Job status `CANCELED` indicates the job was canceled. + + Job status `CNV` indicates a converter error. + + Job status `FLU` indicates the job was flushed. + + Job status `JCLERR` or `JCL ERROR` indicates the JCL has an error. + + Job status `SEC` or `SEC ERROR` indicates the job as encountered a security error. + + Job status `SYS` indicates a system failure. + + Job status `?` indicates status can not be determined. | **type**: str - | **sample**: CC 0000 + | **sample**: AC msg_code - Return code extracted from the `msg` so that it can be evaluated as a string. Jobs that take longer to assign a value can have a value of '?'. + The return code from the submitted job as a string. | **type**: str msg_txt - Returns additional information related to the job. Jobs that take longer to assign a value can have a value of '?'. + Returns additional information related to the submitted job. | **type**: str - | **sample**: The job completion code (CC) was not available in the job output, please review the job log." + | **sample**: The job JOB00551 was run with special job processing TYPRUN=SCAN. This will result in no completion, return code or job steps and changed will be false. code - Return code converted to an integer value (when possible). For JCL ERRORs, this will be None. + The return code converted to an integer value when available. + + Jobs which have no return code will return NULL, such is the case of a job that errors or is active. | **type**: int diff --git a/docs/source/modules/zos_script.rst b/docs/source/modules/zos_script.rst index f51096361..31b237588 100644 --- a/docs/source/modules/zos_script.rst +++ b/docs/source/modules/zos_script.rst @@ -296,9 +296,7 @@ Notes For supported character sets used to encode data, refer to the `documentation `_. - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine. - - `zos_copy <./zos_copy.html>`_ uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; Co:Z SFTP is not supported. In the case of Co:z SFTP, you can exempt the Ansible userid on z/OS from using Co:Z thus falling back to using standard SFTP. + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with `zos_tso_command <./zos_tso_command.html>`_. diff --git a/docs/source/modules/zos_tso_command.rst b/docs/source/modules/zos_tso_command.rst index f3cdb0254..4af6b1b52 100644 --- a/docs/source/modules/zos_tso_command.rst +++ b/docs/source/modules/zos_tso_command.rst @@ -72,7 +72,7 @@ Examples - LISTDSD DATASET('HLQ.DATA.SET') ALL GENERIC max_rc: 4 - - name: Execute TSO command to run explicitly a REXX script from a data set. + - name: Execute TSO command to run a REXX script explicitly from a data set. zos_tso_command: commands: - EXEC HLQ.DATASET.REXX exec diff --git a/docs/source/modules/zos_unarchive.rst b/docs/source/modules/zos_unarchive.rst index da80bd31a..91fa597ee 100644 --- a/docs/source/modules/zos_unarchive.rst +++ b/docs/source/modules/zos_unarchive.rst @@ -362,14 +362,14 @@ Examples # Simple extract - name: Copy local tar file and unpack it on the managed z/OS node. zos_unarchive: - path: "./files/archive_folder_test.tar" + src: "./files/archive_folder_test.tar" format: name: tar # use include - name: Unarchive a bzip file selecting only a file to unpack. zos_unarchive: - path: "/tmp/test.bz2" + src: "/tmp/test.bz2" format: name: bz2 include: @@ -378,7 +378,7 @@ Examples # Use exclude - name: Unarchive a terse data set and excluding data sets from unpacking. zos_unarchive: - path: "USER.ARCHIVE.RESULT.TRS" + src: "USER.ARCHIVE.RESULT.TRS" format: name: terse exclude: @@ -388,7 +388,7 @@ Examples # List option - name: List content from XMIT zos_unarchive: - path: "USER.ARCHIVE.RESULT.XMIT" + src: "USER.ARCHIVE.RESULT.XMIT" format: name: xmit format_options: @@ -404,6 +404,8 @@ Notes .. note:: VSAMs are not supported. + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + See Also @@ -411,7 +413,7 @@ See Also .. seealso:: - - :ref:`zos_unarchive_module` + - :ref:`zos_archive_module` @@ -420,14 +422,14 @@ Return Values ------------- -path - File path or data set name unarchived. +src + File path or data set name unpacked. | **returned**: always | **type**: str dest_path - Destination path where archive was extracted. + Destination path where archive was unpacked. | **returned**: always | **type**: str diff --git a/docs/source/plugins.rst b/docs/source/plugins.rst index 5c8605ad3..ef0f6c183 100644 --- a/docs/source/plugins.rst +++ b/docs/source/plugins.rst @@ -5,30 +5,33 @@ Plugins ======= -Plugins that come with the **IBM z/OS core collection** augment Ansible's core +Plugins that come with the **IBM z/OS core collection** complement Ansible's core functionality. Ansible uses a plugin architecture to enable a rich, flexible and expandable feature set. Action ------ -* ``zos_ping``: Manages the REXX source transferred to the z/OS managed node for - `zos_ping`_. -* ``zos_copy``: Used to `copy data`_ from the controller to the z/OS managed - node. -* ``zos_fetch``: Used to `fetch data`_ from the z/OS managed node to the - controller. -* ``zos_job_submit``: Used to `submit a job`_ from the controller and optionally - monitor the job completion. +Action plugins integrate local processing and local data with module functionality. +Action plugins are executed by default when an associated module is used; no additional +user action is required, this documentation is reference only. -.. _normal: - https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/action/normal.py -.. _zos_ping: - modules/zos_ping.html -.. _copy data: +* `zos_copy`_: Used to copy data from the controller to the z/OS manage node. +* `zos_fetch`_: Used to fetch data from the z/OS managed node to the controller. +* `zos_job_submit`_: Used to submit a job from the controller to the z/OS manage node. +* `zos_ping`_: Used to transfer the modules REXX source to the z/OS managed node. +* `zos_script`_: Used to transfer scripts from the controller to the z/OS manage node. +* `_zos_unarchive`_: Used to transfer archives from the controller to the z/OS manage node. + +.. _zos_copy: modules/zos_copy.html -.. _fetch data: +.. _zos_fetch: modules/zos_fetch.html -.. _submit a job: +.. _zos_job_submit: modules/zos_job_submit.html - +.. _zos_ping: + modules/zos_ping.html +.. _zos_script: + modules/zos_script.html +.. _zos_unarchive: + modules/zos_unarchive.html diff --git a/docs/source/release_notes.rst b/docs/source/release_notes.rst index 726c1b64c..7c2c3a929 100644 --- a/docs/source/release_notes.rst +++ b/docs/source/release_notes.rst @@ -1,13 +1,22 @@ .. ........................................................................... -.. © Copyright IBM Corporation 2020, 2021, 2023 . +.. © Copyright IBM Corporation 2020, 2024 . .. ........................................................................... ======== Releases ======== -Version 1.9.0-beta.1 -==================== +Version 1.9.0 +============= + +Major Changes +------------- + - IBM Ansible z/OS core collection (**ibm_zos_core**) version 1.9.0 will be the last release to support ZOAU 1.2.x. + + - IBM Ansible z/OS core version 1.9.0 will continue to receive security updates and bug fixes. + + - Starting with IBM Ansible z/OS core version 1.10.0, ZOAU version 1.3.0 will be required. + - IBM Open Enterprise SDK for Python version 3.9.x is no longer supported. Minor Changes ------------- @@ -21,7 +30,24 @@ Minor Changes - Improved messages in the action plugin. - Improved the action plugin performance, flow and use of undocumented variables. - Improved the modules handling of ZOAU import errors allowing for the traceback to flow back to the source. -- ``zos_tso_command`` - Has been updated with a new example demonstrating how to explicitly execute a REXX script in a data set. + - Improved job status support, now the supported statuses for property **ret_code[msg]** are: + + - Job status **ABEND** indicates the job ended abnormally. + - Job status **AC** indicates the job is active, often a started task or job taking long. + - Job status **CAB** indicates a converter abend. + - Job status **CANCELED** indicates the job was canceled. + - Job status **CNV** indicates a converter error. + - Job status **FLU** indicates the job was flushed. + - Job status **JCLERR** or **JCL ERROR** indicates the JCL has an error. + - Job status **SEC** or **SEC ERROR** indicates the job as encountered a security error. + - Job status **SYS** indicates a system failure. + - Job status **?** indicates status can not be determined. + +- ``zos_tso_command`` + + - Has been updated with a new example demonstrating how to explicitly execute a REXX script in a data set. + - Has been updated with a new example demonstrating how to chain multiple TSO commands into one invocation using semicolons. + - ``zos_mvs_raw`` - Has been enhanced to ensure that **instream-data** for option **dd_input** contain blanks in columns 1 and 2 while retaining a maximum length @@ -33,40 +59,69 @@ Minor Changes Bugfixes -------- +- ``zos_apf`` - Fixed an issue that when **operation=list** was selected and more than one data set entry was fetched, only one + data set was returned, now the complete list is returned. + - ``zos_copy`` - - Fixed an issue when copying an aliased executable from a data set to a non-existent data set, the destination data sets primary - and secondary extents would not match the source data set extent sizes. + - Fixed an issue that when copying an aliased executable from a data set to a non-existent data set, the destination + datasets primary and secondary extents would not match the source data set extent sizes. - Fixed an issue when performing a copy operation to an existing file, the copied file resulted in having corrupted contents. -- ``zos_job_output`` - Fixed an issue that when using a job ID with less than 8 characters would result in a traceback. The fix +- ``zos_job_submit`` + + - Fixed an issue that when no **location** is set, the default is not correctly configured to **location=DATA_SET**. + - Fixed an issue that when a JCL error is encountered, the **ret_code[msg_code]** no longer will contain the multi line marker used to coordinate errors. + - Fixed an issue that when a response was returned, the property **ret_code[msg_text]** was incorrectly returned over **ret_code[msg_txt]**. + - Fixed an issue that when JCL contained **TYPRUN=SCAN**, the module would fail. The module no longer fails and an appropriate message and response is returned. + - Fixed an issue that when JCL contained either **TYPRUN=COPY**, **TYPRUN=HOLD**, or **TYPRUN=JCLHOLD** an improper message was returned and the job submission failed. + Now the job will fail under the condition that the module has exceeded its wait time and return a proper message. + - Fixed an issue where when option **wait_time_s** was used, the duration would be approximately 5 seconds longer than what was reported in the duration. + Now the duration is from when the job is submitted to when the module reads the job output. + +- ``zos_job_output`` - Fixed an issue that when using a job ID with less than 8 characters, would result in a traceback. The fix supports shorter job IDs as well as the use of wildcards. -- ``zos_job_query`` - Fixed an issue that when using a job ID with less than 8 characters would result in a traceback. The fix +- ``zos_job_query`` - Fixed an issue that when using a job ID with less than 8 characters, would result in a traceback. The fix supports shorter job IDs as well as the use of wildcards. - ``zos_unarchive`` - - Fixed an issue when using a local file with the USS format option that would fail sending it to the managed node. - - Fixed an issue that occurred when unarchiving USS files that would leave temporary files behind on the managed node. + - Fixed an issue that when using a local file with the USS format option, the module would fail to send the archive to the managed node. + - Fixed an issue that occurred when unarchiving USS files, the module would leave temporary files behind on the managed node. + +- ``module_utils`` + + - ``job.py`` - Improved exception handling and added a message inside the **content** of the **ddname** when a non-printable + character (character that can not be converted to UTF-8) is encountered. + - ``data_set.py`` - Fixed an issue that when a volser name less than 6 characters was encountered, the volser name was padded with hyphens to have length 6. + Known Issues ------------ Several modules have reported UTF-8 decoding errors when interacting with results that contain non-printable UTF-8 characters in the response. -This occurs when a module receives content that does not correspond to a UTF-8 value. These include modules ``zos_job_submit``, ``zos_job_output``, -``zos_operator_action_query``` but are not limited to this list. This will be addressed in **ibm_zos_core** version 1.10.0-beta.1. Each case is -unique, some options to work around the error are below. +- This occurs when a module receives content that does not correspond to a UTF-8 value. These include modules ``zos_job_submit``, ``zos_job_output``, + ``zos_operator_action_query``` but are not limited to this list. This has been addressed in this release and corrected with **ZOAU version 1.2.5.6**. +- If the appropriate level of ZOAU can not be installed, some options are to: -- Specify that the ASA assembler option be enabled to instruct the assembler to use ANSI control characters instead of machine code control characters. -- Add **ignore_errors:true** to the playbook task so the task error will not fail the playbook. -- If the error is resulting from a batch job, add **ignore_errors:true** to the task and capture the output into a variable and extract the job ID with - a regular expression and then use ``zos_job_output`` to display the DD without the non-printable character such as the DD **JESMSGLG**. + - Specify that the ASA assembler option be enabled to instruct the assembler to use ANSI control characters instead of machine code control characters. + - Ignore module errors by using **ignore_errors:true** for a specific playbook task. + - If the error is resulting from a batch job, add **ignore_errors:true** to the task and capture the output into a registered variable to extract the + job ID with a regular expression. Then use ``zos_job_output`` to display the DD without the non-printable character such as the DD **JESMSGLG**. + - If the error is the result of a batch job, set option **return_output** to false so that no DDs are read which could contain the non-printable UTF-8 characters. + +An undocumented option **size** was defined in module **zos_data_set**, this has been removed to satisfy collection certification, use the intended +and documented **space_primary** option. + +In the past, choices could be defined in either lower or upper case. Now, only the case that is identified in the docs can be set, +this is so that the collection can continue to maintain certified status. Availability ------------ +* `Automation Hub`_ * `Galaxy`_ * `GitHub`_ @@ -75,7 +130,7 @@ Reference * Supported by `z/OS®`_ V2R4 (or later) but prior to version V3R1 * Supported by the `z/OS® shell`_ -* Supported by `IBM Open Enterprise SDK for Python`_ `3.9`_ - `3.11`_ +* Supported by `IBM Open Enterprise SDK for Python`_ `3.10`_ - `3.12`_ * Supported by IBM `Z Open Automation Utilities 1.2.5`_ (or later) but prior to version 1.3. Version 1.8.0 @@ -978,6 +1033,8 @@ Known issues https://www.ibm.com/docs/en/python-zos/3.10 .. _3.11: https://www.ibm.com/docs/en/python-zos/3.11 +.. _3.12: + https://www.ibm.com/docs/en/python-zos/3.12 .. _Z Open Automation Utilities 1.1.0: https://www.ibm.com/docs/en/zoau/1.1.x .. _Z Open Automation Utilities 1.1.1: diff --git a/galaxy.yml b/galaxy.yml index 93af5d038..c408424aa 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -6,7 +6,7 @@ namespace: ibm name: ibm_zos_core # The collection version -version: 1.9.0-beta.1 +version: 1.10.0-beta.1 # Collection README file readme: README.md @@ -96,4 +96,5 @@ build_ignore: - tests/sanity/ignore-2.11.txt - tests/sanity/ignore-2.12.txt - tests/sanity/ignore-2.13.txt + - tests/sanity/ignore-2.14.txt - venv* diff --git a/meta/ibm_zos_core_meta.yml b/meta/ibm_zos_core_meta.yml index abab47f9c..7e24bc280 100644 --- a/meta/ibm_zos_core_meta.yml +++ b/meta/ibm_zos_core_meta.yml @@ -1,10 +1,10 @@ name: ibm_zos_core -version: "1.9.0-beta.1" +version: "1.10.0-beta.1" managed_requirements: - name: "IBM Open Enterprise SDK for Python" - version: ">=3.9" + version: ">=3.10" - name: "Z Open Automation Utilities" version: - - "1.2.5" + - "1.3.0" diff --git a/meta/runtime.yml b/meta/runtime.yml index be99ccf4b..898ad8ff5 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,2 +1,2 @@ --- -requires_ansible: '>=2.14.0' +requires_ansible: '>=2.15.0' diff --git a/plugins/filter/wtor.py b/plugins/filter/wtor.py index 28e908376..17b530218 100644 --- a/plugins/filter/wtor.py +++ b/plugins/filter/wtor.py @@ -12,6 +12,61 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type + +DOCUMENTATION = r""" +name: filter_wtor_messages +author: Demetrios Dimatos (@ddimatos) +version_added: "1.2.0" +short_description: Filter a list of WTOR messages +description: + - Filter a list of WTOR (write to operator with reply) messages found by + module zos_operator_action_query. + - Filter using a string or regular expression. +options: + wtor_response: + description: + - A list containing response property `message_text`, provided the + module zos_operator_action_query. + - The list can be the outstanding messages found in the modules + response under the `actions` property or the entire module + response. + type: list + required: true + text: + description: + - String of text to match or a regular expression to use as filter criteria. + type: str + required: true + ingore_case: + description: + - Should the filter enable case sensitivity when performing a match. + type: bool + required: false + default: false +""" + +EXAMPLES = r""" +- name: Filter actionable messages that match 'IEE094D SPECIFY OPERAND' and if so, set is_specify_operand = true. + set_fact: + is_specify_operand: "{{ result | ibm.ibm_zos_core.filter_wtor_messages('IEE094D SPECIFY OPERAND') }}" + when: result is defined and not result.failed + +- name: Evaluate if there are any existing dump messages matching 'IEE094D SPECIFY OPERAND' + assert: + that: + - is_specify_operand is defined + - bool_zos_operator_action_continue + success_msg: "Found 'IEE094D SPECIFY OPERAND' message." + fail_msg: "Did not find 'IEE094D SPECIFY OPERAND' message." +""" + +RETURN = r""" + _value: + description: A list containing dictionaries matching the WTOR. + type: list + elements: dict +""" + import re diff --git a/plugins/module_utils/job.py b/plugins/module_utils/job.py index 1f49a2b26..25483b45d 100644 --- a/plugins/module_utils/job.py +++ b/plugins/module_utils/job.py @@ -1,4 +1,4 @@ -# Copyright (c) IBM Corporation 2019 - 2024 +# Copyright (c) IBM Corporation 2019, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -21,7 +21,7 @@ # Only importing this module so we can catch a JSONDecodeError that sometimes happens # when a job's output has non-printable chars that conflict with JSON's control # chars. -from json import decoder +from json import JSONDecodeError from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.better_arg_parser import ( BetterArgParser, ) @@ -29,6 +29,12 @@ ZOAUImportError ) +try: + from zoautil_py import exceptions +except ImportError: + exceptions = ZOAUImportError(traceback.format_exc()) + + try: # For files that import individual functions from a ZOAU module, # we'll replace the imports to instead get the module. @@ -40,6 +46,18 @@ except Exception: jobs = ZOAUImportError(traceback.format_exc()) +JOB_ERROR_STATUSES = frozenset(["ABEND", # ZOAU job ended abnormally + "SEC ERROR", # Security error (legacy Ansible code) + "SEC", # ZOAU security error + "JCL ERROR", # Job had a JCL error (legacy Ansible code) + "JCLERR", # ZOAU job had a JCL error + "CANCELED", # ZOAU job was cancelled + "CAB", # ZOAU converter abend + "CNV", # ZOAU converter error + "SYS", # ZOAU system failure + "FLU" # ZOAU job was flushed + ]) + def job_output(job_id=None, owner=None, job_name=None, dd_name=None, dd_scan=True, duration=0, timeout=0, start_time=timer()): """Get the output from a z/OS job based on various search criteria. @@ -89,11 +107,6 @@ def job_output(job_id=None, owner=None, job_name=None, dd_name=None, dd_scan=Tru start_time=start_time ) - # while ((job_detail is None or len(job_detail) == 0) and duration <= timeout): - # current_time = timer() - # duration = round(current_time - start_time) - # sleep(1) - if len(job_detail) == 0: # some systems have issues with "*" while some require it to see results job_id = "" if job_id == "*" else job_id @@ -238,17 +251,9 @@ def _get_job_status(job_id="*", owner="*", job_name="*", dd_name=None, dd_scan=T # Preserve the original job_id for the failure path job_id_temp = job_id - # jls output: owner=job[0], name=job[1], id=job[2], status=job[3], rc=job[4] - # e.g.: OMVSADM HELLO JOB00126 JCLERR ? - # jobs.listing(job_id, owner) in 1.2.0 has owner param, 1.1 does not - # jls output has expanded in zoau 1.2.3 and later: jls -l -v shows headers - # jobclass=job[5] serviceclass=job[6] priority=job[7] asid=job[8] - # creationdatetime=job[9] queueposition=job[10] - # starting in zoau 1.2.4, program_name[11] was added. In 1.3.0, include_extended - # has to be set to true so we get the program name for a job. - # Testing has shown that the program_name impact is minor, so we're removing that option - final_entries = [] + + # In 1.3.0, include_extended has to be set to true so we get the program name for a job. entries = jobs.fetch_multiple(job_id=job_id_temp, include_extended=True) while ((entries is None or len(entries) == 0) and duration <= timeout): @@ -276,25 +281,17 @@ def _get_job_status(job_id="*", owner="*", job_name="*", dd_name=None, dd_scan=T job["system"] = "" job["owner"] = entry.owner - job["ret_code"] = dict() - # From v1.3.0, ZOAU sets unavailable job fields as None, instead of '?'. - # This new way of constructing msg allows for a better empty message. - # "" instead of "None None". - job["ret_code"]["msg"] = "{0} {1}".format( - entry.status if entry.status else "", - entry.return_code if entry.return_code else "" - ).strip() - + job["ret_code"] = {} + job["ret_code"]["msg"] = entry.status job["ret_code"]["msg_code"] = entry.return_code job["ret_code"]["code"] = None if entry.return_code and len(entry.return_code) > 0: if entry.return_code.isdigit(): job["ret_code"]["code"] = int(entry.return_code) - job["ret_code"]["msg_text"] = entry.status if entry.status else "?" + job["ret_code"]["msg_txt"] = entry.status - # Beginning in ZOAU v1.3.0, the Job class changes svc_class to - # service_class. + # Beginning in ZOAU v1.3.0, the Job class changes svc_class to service_class. job["svc_class"] = entry.service_class job["job_class"] = entry.job_class job["priority"] = entry.priority @@ -310,16 +307,45 @@ def _get_job_status(job_id="*", owner="*", job_name="*", dd_name=None, dd_scan=T job["duration"] = duration if dd_scan: - list_of_dds = jobs.list_dds(entry.job_id) - while ((list_of_dds is None or len(list_of_dds) == 0) and duration <= timeout): + # If true, it means the job is not ready for DD queries and the duration and + # timeout should apply here instructing the user to add more time + is_dd_query_exception = False + is_jesjcl = False + list_of_dds = [] + + try: + list_of_dds = jobs.list_dds(entry.job_id) + except exceptions.DDQueryException as err: + if 'BGYSC5201E' in str(err): + is_dd_query_exception = True + pass + + # Check if the Job has JESJCL, if not, its in the JES INPUT queue, thus wait the full wait_time_s. + # Idea here is to force a TYPRUN{HOLD|JCLHOLD|COPY} job to go the full wait duration since we have + # currently no way to detect them, but if we know the job is one of the JOB_ERROR_STATUS lets + # exit the wait time supplied as we know it is a job failure. + is_jesjcl = True if search_dictionaries("dd_name", "JESJCL", list_of_dds) else False + is_job_error_status = True if entry.status in JOB_ERROR_STATUSES else False + + while ((list_of_dds is None or len(list_of_dds) == 0 or is_dd_query_exception) and + (not is_jesjcl and not is_job_error_status and duration <= timeout)): current_time = timer() duration = round(current_time - start_time) sleep(1) - list_of_dds = jobs.list_dds(entry.job_id) + try: + # Note, in the event of an exception, eg job has TYPRUN=HOLD + # list_of_dds will still be populated with valuable content + list_of_dds = jobs.list_dds(entry.job_id) + is_jesjcl = True if search_dictionaries("dd_name", "JESJCL", list_of_dds) else False + is_job_error_status = True if entry.status in JOB_ERROR_STATUSES else False + except exceptions.DDQueryException as err: + if 'BGYSC5201E' in str(err): + is_dd_query_exception = True + continue job["duration"] = duration - for single_dd in list_of_dds: + dd = {} if "dd_name" not in single_dd: @@ -360,23 +386,24 @@ def _get_job_status(job_id="*", owner="*", job_name="*", dd_name=None, dd_scan=T tmpcont = None if "step_name" in single_dd: if "dd_name" in single_dd: - # In case ZOAU fails when reading the job output, we'll - # add a message to the user telling them of this. - # ZOAU cannot read partial output from a job, so we - # have to make do with nothing from this step if it fails. + # In case ZOAU fails when reading the job output, we'll add a + # message to the user telling them of this. ZOAU cannot read + # partial output from a job, so we have to make do with nothing + # from this step if it fails. try: tmpcont = jobs.read_output( entry.job_id, single_dd["step_name"], single_dd["dd_name"] ) - except (UnicodeDecodeError, decoder.JSONDecodeError): + except (UnicodeDecodeError, JSONDecodeError, TypeError, KeyError) as e: tmpcont = ( "Non-printable UTF-8 characters were present in this output. " - "Please access it manually." + "Please access it from the job log." ) dd["content"] = tmpcont.split("\n") + job["ret_code"]["steps"].extend(_parse_steps(tmpcont)) job["ddnames"].append(dd) @@ -397,16 +424,6 @@ def _get_job_status(job_id="*", owner="*", job_name="*", dd_name=None, dd_scan=T job["subsystem"] = (tmptext.split("\n")[ 0]).replace(" ", "") - # Extract similar: "19.49.44 JOB06848 IEFC452I DOCEASYT - JOB NOT RUN - JCL ERROR 029 " - # then further reduce down to: 'JCL ERROR 029' - if job["ret_code"]["msg_code"] == "?": - if "JOB NOT RUN -" in tmpcont: - tmptext = tmpcont.split( - "JOB NOT RUN -")[1].split("\n")[0] - job["ret_code"]["msg"] = tmptext.strip() - job["ret_code"]["msg_code"] = None - job["ret_code"]["code"] = None - final_entries.append(job) if not final_entries: final_entries = _job_not_found(job_id, owner, job_name, "unavailable") @@ -439,3 +456,25 @@ def _ddname_pattern(contents, resolve_dependencies): ) ) return str(contents) + + +def search_dictionaries(key, value, list_of_dictionaries): + """ Searches a list of dictionaries given key and returns + the value dictionary. + + Arguments: + key {str} -- dictionary key to search for. + value {str} -- value to match for the dictionary key + list {str} -- list of dictionaries + + Returns: + dictionary -- dictionary matching the key and value + + Raises: + TypeError -- When input is not a list of dictionaries + """ + if not isinstance(list_of_dictionaries, list): + raise TypeError( + "Unsupported type for 'list_of_dictionaries', must be a list of dictionaries") + + return [element for element in list_of_dictionaries if element[key] == value] diff --git a/plugins/modules/zos_copy.py b/plugins/modules/zos_copy.py index 6991c4d81..9acb3c1c6 100644 --- a/plugins/modules/zos_copy.py +++ b/plugins/modules/zos_copy.py @@ -94,6 +94,10 @@ - C(dest) can be a USS file, directory or MVS data set name. - If C(dest) has missing parent directories, they will be created. - If C(dest) is a nonexistent USS file, it will be created. + - If C(dest) is a new USS file or replacement, the file will be appropriately tagged with + either the system's default locale or the encoding option defined. If the USS file is + a replacement, the user must have write authority to the file either through ownership, + group or other permissions, else the module will fail. - If C(dest) is a nonexistent data set, it will be created following the process outlined here and in the C(volume) option. - If C(dest) is a nonexistent data set, the attributes assigned will depend on the type of @@ -467,15 +471,16 @@ - VSAM data sets can only be copied to other VSAM data sets. - For supported character sets used to encode data, refer to the L(documentation,https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/resources/character_set.html). - - L(zos_copy,./zos_copy.html) uses SFTP (Secure File Transfer Protocol) for the underlying - transfer protocol; Co:Z SFTP is not supported. In the case of Co:z SFTP, - you can exempt the Ansible userid on z/OS from using Co:Z thus falling back - to using standard SFTP. - - Beginning in version 1.8.x, zos_copy will no longer attempt to autocorrect a copy of a data type member - into a PDSE that contains program objects. You can control this behavior using module option - executable that will signify an executable is being copied into a PDSE with other - executables. Mixing data type members with program objects will be responded with a - (FSUM8976,./zos_copy.html) error. + - This module uses SFTP (Secure File Transfer Protocol) for the underlying + transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the + case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling + back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for + transfers, if not available, the module will fail. + - Beginning in version 1.8.x, zos_copy will no longer attempt to correct a copy of + a data type member into a PDSE that contains program objects. You can control this + behavior using module option C(executable) that will signify an executable is being + copied into a PDSE with other executables. Mixing data type members with program + objects will result in a (FSUM8976,./zos_copy.html) error. seealso: - module: zos_fetch - module: zos_data_set diff --git a/plugins/modules/zos_fetch.py b/plugins/modules/zos_fetch.py index dc4bc8071..cc26b622b 100644 --- a/plugins/modules/zos_fetch.py +++ b/plugins/modules/zos_fetch.py @@ -146,10 +146,11 @@ - Fetching HFS or ZFS type data sets is currently not supported. - For supported character sets used to encode data, refer to the L(documentation,https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/resources/character_set.html). - - L(zos_fetch,./zos_fetch.html) uses SFTP (Secure File Transfer Protocol) for the underlying - transfer protocol; Co:Z SFTP is not supported. In the case of Co:z SFTP, - you can exempt the Ansible userid on z/OS from using Co:Z thus falling back - to using standard SFTP. + - This module uses SFTP (Secure File Transfer Protocol) for the underlying + transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the + case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling + back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for + transfers, if not available, the module will fail. seealso: - module: zos_data_set - module: zos_copy diff --git a/plugins/modules/zos_job_submit.py b/plugins/modules/zos_job_submit.py index 1fd5030b5..7c66c2543 100644 --- a/plugins/modules/zos_job_submit.py +++ b/plugins/modules/zos_job_submit.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2019 - 2024 +# Copyright (c) IBM Corporation 2019, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -25,9 +25,8 @@ - "Demetrios Dimatos (@ddimatos)" short_description: Submit JCL description: - - Submit JCL from a data set, USS, or from the controller. - - Submit a job and optionally monitor for completion. - - Optionally, wait a designated time until the job finishes. + - Submit JCL in a data set, USS file, or file on the controller. + - Submit a job and monitor for completion. - For an uncataloged dataset, specify the volume serial number. version_added: "1.0.0" options: @@ -126,6 +125,13 @@ notes: - For supported character sets used to encode data, refer to the L(documentation,https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/resources/character_set.html). + - This module uses L(zos_copy,./zos_copy.html) to copy local scripts to + the remote machine which uses SFTP (Secure File Transfer Protocol) for the + underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not + supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS + from using Co:Z thus falling back to using standard SFTP. If the module detects + SCP, it will temporarily use SFTP for transfers, if not available, the module + will fail. """ RETURN = r""" @@ -217,28 +223,40 @@ contains: msg: description: - Return code resulting from the job submission. Jobs that take - longer to assign a value can have a value of '?'. + - Job status resulting from the job submission. + - Job status `ABEND` indicates the job ended abnormally. + - Job status `AC` indicates the job is active, often a started task or job taking long. + - Job status `CAB` indicates a converter abend. + - Job status `CANCELED` indicates the job was canceled. + - Job status `CNV` indicates a converter error. + - Job status `FLU` indicates the job was flushed. + - Job status `JCLERR` or `JCL ERROR` indicates the JCL has an error. + - Job status `SEC` or `SEC ERROR` indicates the job as encountered a security error. + - Job status `SYS` indicates a system failure. + - Job status `?` indicates status can not be determined. + - Jobs where status can not be determined will result in None (NULL). type: str - sample: CC 0000 + sample: AC msg_code: description: - Return code extracted from the `msg` so that it can be evaluated - as a string. Jobs that take longer to assign a value can have a - value of '?'. + - The return code from the submitted job as a string. + - Jobs which have no return code will result in None (NULL), such + is the case of a job that errors or is active. type: str sample: 0000 msg_txt: description: - Returns additional information related to the job. Jobs that take - longer to assign a value can have a value of '?'. + - Returns additional information related to the submitted job. + - Jobs which have no additional information will result in None (NULL). type: str - sample: The job completion code (CC) was not available in the job - output, please review the job log." + sample: The job JOB00551 was run with special job processing TYPRUN=SCAN. + This will result in no completion, return code or job steps and + changed will be false. code: description: - Return code converted to an integer value (when possible). - For JCL ERRORs, this will be None. + - The return code converted to an integer value when available. + - Jobs which have no return code will result in None (NULL), such + is the case of a job that errors or is active. type: int sample: 0 steps: @@ -537,15 +555,10 @@ "system": "STL1" } ] -message: - description: This option is being deprecated - returned: success - type: str - sample: Submit JCL operation succeeded. """ EXAMPLES = r""" -- name: Submit JCL in a PDSE member +- name: Submit JCL in a PDSE member. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) location: DATA_SET @@ -597,7 +610,7 @@ BetterArgParser, ) from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.job import ( - job_output, + job_output, search_dictionaries, JOB_ERROR_STATUSES ) from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.import_handler import ( ZOAUImportError, @@ -627,8 +640,10 @@ jobs = ZOAUImportError(traceback.format_exc()) -JOB_COMPLETION_MESSAGES = frozenset(["CC", "ABEND", "SEC ERROR", "JCL ERROR", "JCLERR"]) -JOB_ERROR_MESSAGES = frozenset(["ABEND", "SEC ERROR", "SEC", "JCL ERROR", "JCLERR"]) +JOB_STATUSES = list(dict.fromkeys(JOB_ERROR_STATUSES)) +JOB_STATUSES.append("CC") + +JOB_SPECIAL_PROCESSING = frozenset(["TYPRUN"]) MAX_WAIT_TIME_S = 86400 @@ -693,23 +708,39 @@ def submit_src_jcl(module, src, src_name=None, timeout=0, is_unix=True, volume=N # which is what ZOAU sends back, opitonally we can check the 'status' as # that is sent back as `AC` when the job is not complete but the problem # with monitoring 'AC' is that STARTED tasks never exit the AC status. + job_fetched = None + job_fetch_rc = None + job_fetch_status = None + if job_submitted: - job_fetch_rc = jobs.fetch_multiple(job_submitted.job_id)[0].return_code - job_fetch_status = jobs.fetch_multiple(job_submitted.job_id)[0].status + try: + job_fetched = jobs.fetch_multiple(job_submitted.job_id)[0] + job_fetch_rc = job_fetched.return_code + job_fetch_status = job_fetched.status + except zoau_exceptions.JobFetchException: + pass # Before moving forward lets ensure our job has completed but if we see - # status that matches one in JOB_ERROR_MESSAGES, don't wait, let the code - # drop through and get analyzed in the main as it will scan the job ouput. - # Any match to JOB_ERROR_MESSAGES ends our processing and wait times. - while (job_fetch_status not in JOB_ERROR_MESSAGES and + # status that matches one in JOB_STATUSES, don't wait, let the code + # drop through and get analyzed in the main as it will scan the job ouput + # Any match to JOB_STATUSES ends our processing and wait times + while (job_fetch_status not in JOB_STATUSES and job_fetch_status == 'AC' and ((job_fetch_rc is None or len(job_fetch_rc) == 0 or job_fetch_rc == '?') and duration < timeout)): current_time = timer() duration = round(current_time - start_time) sleep(1) - job_fetch_rc = jobs.fetch_multiple(job_submitted.job_id)[0].return_code - job_fetch_status = jobs.fetch_multiple(job_submitted.job_id)[0].status + try: + job_fetched = jobs.fetch_multiple(job_submitted.job_id)[0] + job_fetch_rc = job_fetched.return_code + job_fetch_status = job_fetched.status + # Allow for jobs that need more time to be fectched to run the wait_time_s + except zoau_exceptions.JobFetchException as err: + if duration >= timeout: + raise err + else: + continue # ZOAU throws a JobSubmitException when the job sumbission fails thus there is no # JCL RC to share with the user, if there is a RC, that will be processed @@ -736,11 +767,12 @@ def submit_src_jcl(module, src, src_name=None, timeout=0, is_unix=True, volume=N result["stderr"] = to_text(err) result["duration"] = duration result["job_id"] = job_submitted.job_id + _msg_detail = "the job with status {0}".format(job_fetch_status) if job_fetch_status else "its status" result["msg"] = ("The JCL has been submitted {0} with ID {1} but there was an " - "error while fetching its status within the allocated time of {2} " + "error while fetching {2} within the allocated time of {3} " "seconds. Consider using module zos_job_query to poll for the " "job for more information. Standard error may have additional " - "information.".format(src_name, job_submitted.job_id, str(timeout))) + "information.".format(src_name, job_submitted.job_id, _msg_detail, str(timeout))) module.fail_json(**result) # Between getting a job_submitted and the jobs.fetch_multiple(job_submitted.job_id)[0].return_code @@ -882,7 +914,7 @@ def run_module(): if wait_time_s <= 0 or wait_time_s > MAX_WAIT_TIME_S: result["failed"] = True - result["msg"] = ("The value for option `wait_time_s` is not valid, it must " + result["msg"] = ("The value for option 'wait_time_s' is not valid, it must " "be greater than 0 and less than {0}.".format(str(MAX_WAIT_TIME_S))) module.fail_json(**result) @@ -899,29 +931,39 @@ def run_module(): job_submitted_id, duration = submit_src_jcl( module, src, src_name=src, timeout=wait_time_s, is_unix=True) - try: - # Explictly pass None for the unused args else a default of '*' will be - # used and return undersirable results - job_output_txt = None + # Explictly pass None for the unused args else a default of '*' will be + # used and return undersirable results + job_output_txt = None + try: job_output_txt = job_output( job_id=job_submitted_id, owner=None, job_name=None, dd_name=None, dd_scan=return_output, duration=duration, timeout=wait_time_s, start_time=start_time) + # This is resolvig a bug where the duration coming from job_output is passed by value, duration + # being an immutable type can not be changed and must be returned or accessed from the job.py. + if job_output is not None: + duration = job_output_txt[0].get("duration") if not None else duration + result["duration"] = duration if duration >= wait_time_s: result["failed"] = True result["changed"] = False + _msg = ("The JCL submitted with job id {0} but appears to be a long " + "running job that exceeded its maximum wait time of {1} " + "second(s). Consider using module zos_job_query to poll for " + "a long running job or increase option 'wait_times_s' to a value " + "greater than {2}.".format(str(job_submitted_id), str(wait_time_s), str(duration))) + _msg_suffix = ("Consider using module zos_job_query to poll for " + "a long running job or increase option 'wait_times_s' to a value " + "greater than {0}.".format(str(duration))) + if job_output_txt is not None: result["jobs"] = job_output_txt - result["msg"] = ( - "The JCL submitted with job id {0} but appears to be a long " - "running job that exceeded its maximum wait time of {1} " - "second(s). Consider using module zos_job_query to poll for " - "a long running job or increase option 'wait_times_s` to a value " - "greater than {2}.".format( - str(job_submitted_id), str(wait_time_s), str(duration))) + job_ret_code = job_output_txt[0].get("ret_code") + job_ret_code.update({"msg_txt": _msg_suffix}) + result["msg"] = _msg module.exit_json(**result) # Job has submitted, the module changed the managed node @@ -932,35 +974,76 @@ def run_module(): job_ret_code = job_output_txt[0].get("ret_code") if job_ret_code: - job_msg = job_ret_code.get("msg") - job_code = job_ret_code.get("code") - - # retcode["msg"] should never be empty where a retcode["code"] can be None, - # "msg" could be an ABEND which has no corresponding "code" - if job_msg is None: - _msg = ("Unable to find a 'msg' in the 'ret_code' dictionary, " - "please review the job log.") - result["stderr"] = _msg - raise Exception(_msg) + job_ret_code_msg = job_ret_code.get("msg") + job_ret_code_code = job_ret_code.get("code") + job_ret_code_msg_code = job_ret_code.get("msg_code") if return_output is True and max_rc is not None: - is_changed = assert_valid_return_code(max_rc, job_code, job_ret_code) - - if re.search("^(?:{0})".format("|".join(JOB_COMPLETION_MESSAGES)), job_msg): - # If the job_msg doesn't have a CC, it is an improper completion (error/abend) - if re.search("^(?:CC)", job_msg) is None: - _msg = ("The job completion code (CC) was not in the job log. " - "Please review the error {0} and the job log.".format(job_msg)) - result["stderr"] = _msg + is_changed = assert_valid_return_code(max_rc, job_ret_code_code, job_ret_code, result) + + if job_ret_code_msg is not None: + if re.search("^(?:{0})".format("|".join(JOB_STATUSES)), job_ret_code_msg): + # If the job_ret_code_msg doesn't have a CC (completion code), the job failed. + if re.search("^(?:CC)", job_ret_code_msg) is None: + _msg = ("The job completion code (CC) was not in the job log. " + "please review the job log for status {0}.".format(job_ret_code_msg)) + result["stderr"] = _msg + job_ret_code.update({"msg_txt": _msg}) + raise Exception(_msg) + + if job_ret_code_code is None: + # If there is no job_ret_code_code (Job return code) it may NOT be an error, + # some jobs will never return have an RC, eg Jobs with TYPRUN=*, + # Started tasks (which are not supported) so further analyze the + # JESJCL DD to figure out if its a TYPRUN job + + job_dd_names = job_output_txt[0].get("ddnames") + jes_jcl_dd = search_dictionaries("ddname", "JESJCL", job_dd_names) + + # Its possible jobs don't have a JESJCL which are active and this would + # cause an index out of range error. + if not jes_jcl_dd: + _msg_detail = " for status {0}.".format(job_ret_code_msg) if job_ret_code_msg else "." + _msg = ("The job return code was not available in the job log, " + "please review the job log{0}".format(_msg_detail)) + job_ret_code.update({"msg_txt": _msg}) raise Exception(_msg) - if job_code is None: - raise Exception("The job return code was not available in the job log, " - "please review the job log and error {0}.".format(job_msg)) - - if job_code != 0 and max_rc is None: - raise Exception("The job return code {0} was non-zero in the " - "job output, this job has failed.".format(str(job_code))) + jes_jcl_dd_content = jes_jcl_dd[0].get("content") + jes_jcl_dd_content_str = " ".join(jes_jcl_dd_content) + + # The regex can be r"({0})\s*=\s*(COPY|HOLD|JCLHOLD|SCAN)" once zoau support is in. + special_processing_keyword = re.search(r"({0})\s*=\s*(SCAN)" + .format("|".join(JOB_SPECIAL_PROCESSING)), jes_jcl_dd_content_str) + + if special_processing_keyword: + job_ret_code.update({"msg": special_processing_keyword[0]}) + job_ret_code.update({"code": None}) + job_ret_code.update({"msg_code": None}) + job_ret_code.update({"msg_txt": "The job {0} was run with special job " + "processing {1}. This will result in no completion, " + "return code or job steps and changed will be false." + .format(job_submitted_id, special_processing_keyword[0])}) + is_changed = False + else: + # The job_ret_code_code is None at this point, but the job_ret_code_msg_code could be populated + # so check both and provide a proper response. + + if job_ret_code_msg_code is None: + _msg_detail = " for status {0}.".format(job_ret_code_msg) if job_ret_code_msg else "." + _msg = ("The job return code was not available in the job log, " + "please review the job log{0}".format(_msg_detail)) + job_ret_code.update({"msg_txt": _msg}) + raise Exception(_msg) + + # raise Exception("The job return code was not available in the job log, " + # "please review the job log and error {0}.".format(job_ret_code_msg)) + elif job_ret_code_code != 0 and max_rc is None: + _msg = ("The job return code {0} was non-zero in the " + "job output, this job has failed.".format(str(job_ret_code_code))) + job_ret_code.update({"msg_txt": _msg}) + result["stderr"] = _msg + raise Exception(_msg) if not return_output: for job in result.get("jobs", []): @@ -975,7 +1058,6 @@ def run_module(): result["stderr"] = _msg result["jobs"] = None raise Exception(_msg) - except Exception as err: result["failed"] = True result["changed"] = False @@ -995,27 +1077,32 @@ def run_module(): module.exit_json(**result) -def assert_valid_return_code(max_rc, job_rc, ret_code): +def assert_valid_return_code(max_rc, job_rc, ret_code, result): if job_rc is None: raise Exception( "The job return code (ret_code[code]) was not available in the jobs output, " "this job has failed.") if job_rc > max_rc: - raise Exception("The job return code, 'ret_code[code]' {0} for the submitted job is " - "greater than the value set for option 'max_rc' {1}. " - "Increase the value for 'max_rc' otherwise this job submission " - "has failed.".format(str(job_rc), str(max_rc))) + _msg = ("The job return code, 'ret_code[code]' {0} for the submitted job is " + "greater than the value set for option 'max_rc' {1}. " + "Increase the value for 'max_rc' otherwise this job submission " + "has failed.".format(str(job_rc), str(max_rc))) + ret_code.update({"msg_txt": _msg}) + result["stderr"] = _msg + raise Exception(_msg) for step in ret_code["steps"]: step_cc_rc = int(step["step_cc"]) step_name_for_rc = step["step_name"] if step_cc_rc > max_rc: - raise Exception("The step name {0} with return code {1} for the submitted job is " - "greater than the value set for option 'max_rc' {2}. " - "Increase the value for 'max_rc' otherwise this job submission " - "has failed.".format(step_name_for_rc, str(step_cc_rc), str(max_rc))) - + _msg = ("The step name {0} with return code {1} for the submitted job is " + "greater than the value set for option 'max_rc' {2}. " + "Increase the value for 'max_rc' otherwise this job submission " + "has failed.".format(step_name_for_rc, str(step_cc_rc), str(max_rc))) + ret_code.update({"msg_txt": _msg}) + result["stderr"] = _msg + raise Exception(_msg) # If there is NO exception rasied it means that max_rc is larger than the # actual RC from the submitted job. In this case, the ansible changed status # should NOT be 'changed=true' even though the user did override the return code, diff --git a/plugins/modules/zos_ping.py b/plugins/modules/zos_ping.py index 6de0cccf0..5f134cd90 100644 --- a/plugins/modules/zos_ping.py +++ b/plugins/modules/zos_ping.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2019 - 2024 +# Copyright (c) IBM Corporation 2019, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/plugins/modules/zos_ping.rexx b/plugins/modules/zos_ping.rexx index a881146b0..beca54c3b 100644 --- a/plugins/modules/zos_ping.rexx +++ b/plugins/modules/zos_ping.rexx @@ -85,7 +85,7 @@ If (rc <> 0 | returnCode <> HWTJ_OK) Then Do failModule(errmsg, "", retC) End -/* Check for Python version >= 3.8 eg: 'Python 3.10.0' */ +/* Check for Python version >= 3.10 eg: 'Python 3.10.0' */ retC = bpxwunix('python3 --version', out., err.) If (err.0 > 0) Then Do Do index=1 To err.0 diff --git a/plugins/modules/zos_script.py b/plugins/modules/zos_script.py index b69d70b2d..0677d187d 100644 --- a/plugins/modules/zos_script.py +++ b/plugins/modules/zos_script.py @@ -116,11 +116,12 @@ - For supported character sets used to encode data, refer to the L(documentation,https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/resources/character_set.html). - This module uses L(zos_copy,./zos_copy.html) to copy local scripts to - the remote machine. - - L(zos_copy,./zos_copy.html) uses SFTP (Secure File Transfer Protocol) - for the underlying transfer protocol; Co:Z SFTP is not supported. In - the case of Co:z SFTP, you can exempt the Ansible userid on z/OS from - using Co:Z thus falling back to using standard SFTP. + the remote machine which uses SFTP (Secure File Transfer Protocol) for the + underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not + supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS + from using Co:Z thus falling back to using standard SFTP. If the module detects + SCP, it will temporarily use SFTP for transfers, if not available, the module + will fail. - This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with L(zos_tso_command,./zos_tso_command.html). diff --git a/plugins/modules/zos_tso_command.py b/plugins/modules/zos_tso_command.py index 6c2cb6ef6..17e190fb2 100644 --- a/plugins/modules/zos_tso_command.py +++ b/plugins/modules/zos_tso_command.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2019 - 2024 +# Copyright (c) IBM Corporation 2019, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/plugins/modules/zos_unarchive.py b/plugins/modules/zos_unarchive.py index e9b17766c..aa315b3fb 100644 --- a/plugins/modules/zos_unarchive.py +++ b/plugins/modules/zos_unarchive.py @@ -29,8 +29,6 @@ - Supported sources are USS (UNIX System Services) or z/OS data sets. - Mixing MVS data sets with USS files for unarchiving is not supported. - The archive is sent to the remote as binary, so no encoding is performed. - - options: src: description: @@ -311,12 +309,17 @@ type: bool required: false default: false - notes: - VSAMs are not supported. - + - This module uses L(zos_copy,./zos_copy.html) to copy local scripts to + the remote machine which uses SFTP (Secure File Transfer Protocol) for the + underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not + supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS + from using Co:Z thus falling back to using standard SFTP. If the module detects + SCP, it will temporarily use SFTP for transfers, if not available, the module + will fail. seealso: - - module: zos_unarchive + - module: zos_archive ''' EXAMPLES = r''' diff --git a/tests/functional/modules/test_zos_job_query_func.py b/tests/functional/modules/test_zos_job_query_func.py index ee7b03157..8f6c6e072 100644 --- a/tests/functional/modules/test_zos_job_query_func.py +++ b/tests/functional/modules/test_zos_job_query_func.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2019, 2020, 2023 +# Copyright (c) IBM Corporation 2019, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -57,7 +57,7 @@ def test_zos_job_id_query_multi_wildcards_func(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCLQ_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=JDATA_SET_NAME, state="present", type="pds", replace=True + name=JDATA_SET_NAME, state="present", type="PDS", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, JDATA_SET_NAME) @@ -90,7 +90,7 @@ def test_zos_job_name_query_multi_wildcards_func(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCLQ_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=NDATA_SET_NAME, state="present", type="pds", replace=True + name=NDATA_SET_NAME, state="present", type="PDS", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, NDATA_SET_NAME) diff --git a/tests/functional/modules/test_zos_job_submit_func.py b/tests/functional/modules/test_zos_job_submit_func.py index 394a087ad..bae4dbb36 100644 --- a/tests/functional/modules/test_zos_job_submit_func.py +++ b/tests/functional/modules/test_zos_job_submit_func.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2019 - 2024 +# Copyright (c) IBM Corporation 2019, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -163,7 +163,7 @@ //****************************************************************************** //* Job containing a non existent DSN that will force an error. //* Returns: -//* ret_code->(code=null, msg=JCLERR ?, msg_text=JCLERR, msg_code=?) +//* ret_code->(code=null, msg=JCLERR, msg_txt=JCLERR, msg_code=None) //* msg --> The JCL submitted with job id JOB00532 but there was an error, //* please review the error for further details: The job completion //* code (CC) was not in the job log. Please review the error @@ -198,7 +198,7 @@ //* Another job containing no job card resulting in a JCLERROR with an value. It //* won't always be 952, it will increment. //* Returns: -//* ret_code->(code=null, msg=JCL ERROR 952, msg_text=JCLERR, msg_code=null) +//* ret_code->(code=null, msg=JCLERR, msg_text=JCLERR, msg_code=null) //* msg --> The JCL submitted with job id JOB00728 but there was an error, //* please review the error for further details: The job completion //* code (CC) was not in the job log. Please review the error @@ -214,11 +214,11 @@ //* Job containing a USER=FOOBAR that will cause JES to return a SEC ERROR which //* is a security error. //* Returns: -//* ret_code->(code=null, msg=SEC ?, msg_text=SEC, msg_code=?) -//* msg --> The JCL submitted with job id JOB00464 but there was an error, +//* ret_code->(code=None, msg=SEC, msg_txt=, msg_code=?) +//* msg --> The JCL submitted with job id JOB01062 but there was an error, //* please review the error for further details: The job return code -//* was not available in the job log, please review the job log -//* and error SEC ?.", +//* was not available in the job log, please review the job log and +//* status SEC. //****************************************************************************** //INVUSER JOB (T043JM,JM00,1,0,0,0),'HELLO WORLD - JRM',CLASS=R, // MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM,USER=FOOBAR @@ -234,22 +234,102 @@ JCL_FILE_CONTENTS_TYPRUN_SCAN = """//* //****************************************************************************** -//* Job containing a TYPRUN=SCAN that will cause JES to run a syntax check and -//* not actually run the JCL. +//* Job containing a TYPRUN=SCAN will cause JES to run a syntax check and +//* not actually run the JCL. The job will be put on the H output queue, DDs +//* JESJCL and JESMSGLG are available. Ansible considers this a passing job. //* Returns: -//* ret_code->(code=null, msg=? ?, msg_text=?, msg_code=?) -//* msg --> The JCL submitted with job id JOB00620 but there was an error, -//* please review the error for further details: The job return code -//* was not available in the job log, please review the job log -//* and error ? ?.", +//* ret_code->(code=null, msg=TYPRUN=SCAN, msg_txt=, msg_code=null) +//* msg --> The job JOB00551 was run with special job processing TYPRUN=SCAN. +//* This will result in no completion, return code or job steps and +//* changed will be false." //****************************************************************************** -//TYPESCAN JOB (T043JM,JM00,1,0,0,0),'HELLO WORLD - JRM',CLASS=R, -// MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM,TYPRUN=SCAN +//SCAN JOB (T043JM,JM00,1,0,0,0),'HELLO WORLD - JRM',CLASS=R, +// MSGCLASS=H,MSGLEVEL=1,NOTIFY=S0JM,TYPRUN=SCAN //STEP0001 EXEC PGM=IEBGENER //SYSIN DD DUMMY //SYSPRINT DD SYSOUT=* //SYSUT1 DD * -HELLO, WORLD +HELLO, WORLD. SCAN OPERATION +/* +//SYSUT2 DD SYSOUT=* +// +""" + +JCL_FILE_CONTENTS_TYPRUN_COPY = """//* +//****************************************************************************** +//* Job containing a TYPRUN=COPY will cause JES to copy the input job +//* (source content) stream directly to a sysout data set (device specified in +//* the message class parameter (H)) and schedule it for output processing, in +//* other words, the job will be put on the H output queue; DD's +//* JESMSGLG and JESJCLIN are available. Ansible considers this a failing job +//* given currently the jobs status can not be determined so it times out. +//* Returns: +//* ret_code->(code=None, msg=None, msg_txt=, msg_code=None) +//* msg --> The JCL submitted with job id JOB00555 but appears to be a long +//* running job that exceeded its maximum wait time of 10 second(s). +//* Consider using module zos_job_query to poll for a long running +//* job or increase option 'wait_times_s' to a value greater than 11. +//****************************************************************************** +//COPY JOB (T043JM,JM00,1,0,0,0),'HELLO WORLD - JRM',CLASS=R, +// MSGCLASS=H,MSGLEVEL=1,NOTIFY=S0JM,TYPRUN=COPY +//STEP0001 EXEC PGM=IEBGENER +//SYSIN DD DUMMY +//SYSPRINT DD SYSOUT=* +//SYSUT1 DD * +HELLO, WORLD. COPY OPERATION +/* +//SYSUT2 DD SYSOUT=* +// +""" + +JCL_FILE_CONTENTS_TYPRUN_HOLD = """//* +//****************************************************************************** +//* Job containing a TYPRUN=HOLD will cause JES to hold this JCL without +//* executing it until a special event occurs at which time, the operator will +//* release the job from HOLD and allow the job to continue processing. +//* Ansible considers this a failing job +//* given currently the jobs status can not be determined so it times out. +//* Returns: +//* ret_code->(code=None, msg=None, msg_txt=, msg_code=None) +//* msg --> The JCL submitted with job id JOB00555 but appears to be a long +//* running job that exceeded its maximum wait time of 10 second(s). +//* Consider using module zos_job_query to poll for a long running +//* job or increase option 'wait_times_s' to a value greater than 11. +//****************************************************************************** +//HOLD JOB (T043JM,JM00,1,0,0,0),'HELLO WORLD - JRM',CLASS=R, +// MSGCLASS=H,MSGLEVEL=1,NOTIFY=S0JM,TYPRUN=HOLD +//STEP0001 EXEC PGM=IEBGENER +//SYSIN DD DUMMY +//SYSPRINT DD SYSOUT=* +//SYSUT1 DD * +HELLO, WORLD. HOLD OPERATION +/* +//SYSUT2 DD SYSOUT=* +// +""" + +JCL_FILE_CONTENTS_TYPRUN_JCLHOLD = """//* +//****************************************************************************** +//* Job containing a TYPRUN=JCLHOLD will cause JES to will keep the submitted +//* job in the input queue until it's released by an operator or by the default +//* time assigned to the class parameter. As the operator you enter 'A' or 'R' +//* to release it from the queue. +//* Ansible considers this a failing job +//* given currently the jobs status can not be determined so it times out. +//* Returns: +//* ret_code->(code=None, msg=None, msg_txt=, msg_code=None) +//* msg --> The JCL submitted with job id JOB00555 but appears to be a long +//* running job that exceeded its maximum wait time of 10 second(s). +//* Consider using module zos_job_query to poll for a long running +//* job or increase option 'wait_times_s' to a value greater than 11. +//****************************************************************************** +//JCLHOLD JOB (T043JM,JM00,1,0,0,0),'HELLO WORLD - JRM',CLASS=R, +// MSGCLASS=H,MSGLEVEL=1,NOTIFY=S0JM,TYPRUN=JCLHOLD +//STEP0001 EXEC PGM=IEBGENER +//SYSIN DD DUMMY +//SYSPRINT DD SYSOUT=* +//SYSUT1 DD * +HELLO, WORLD. JCLHOLD OPERATION /* //SYSUT2 DD SYSOUT=* // @@ -342,9 +422,11 @@ def test_job_submit_PDS(ansible_zos_module, location): hosts.all.shell( cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) + hosts.all.zos_data_set( - name=data_set_name, state="present", type="pds", replace=True + name=data_set_name, state="present", type="PDS", replace=True ) + hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, data_set_name) ) @@ -362,8 +444,8 @@ def test_job_submit_PDS(ansible_zos_module, location): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") - hosts.all.zos_data_set(name=data_set_name, state="absent") + hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.zos_data_set(name=data_set_name, state="absent") def test_job_submit_PDS_special_characters(ansible_zos_module): @@ -374,7 +456,7 @@ def test_job_submit_PDS_special_characters(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=DATA_SET_NAME_SPECIAL_CHARS, state="present", type="pds", replace=True + name=DATA_SET_NAME_SPECIAL_CHARS, state="present", type="PDS", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format( @@ -465,7 +547,7 @@ def test_job_submit_PDS_volume(ansible_zos_module, volumes_on_systems): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="pds", replace=True, volumes=volume_1 + name=data_set_name, state="present", type="PDS", replace=True, volumes=volume_1 ) hosts.all.shell( @@ -473,7 +555,7 @@ def test_job_submit_PDS_volume(ansible_zos_module, volumes_on_systems): ) hosts.all.zos_data_set( - name=data_set_name, state="uncataloged", type="pds" + name=data_set_name, state="uncataloged", type="PDS" ) results = hosts.all.zos_job_submit(src=data_set_name+"(SAMPLE)", location="DATA_SET", volume=volume_1) @@ -498,7 +580,7 @@ def test_job_submit_PDS_5_SEC_JOB_WAIT_15(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="pds", replace=True + name=data_set_name, state="present", type="PDS", replace=True ) hosts.all.shell( @@ -531,7 +613,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_60(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="pds", replace=True + name=data_set_name, state="present", type="PDS", replace=True ) hosts.all.shell( @@ -564,7 +646,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_10_negative(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="pds", replace=True + name=data_set_name, state="present", type="PDS", replace=True ) hosts.all.shell( @@ -734,43 +816,113 @@ def test_negative_job_submit_local_jcl_no_dsn(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_NO_DSN) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL") + results = hosts.all.zos_job_submit(src=tmp_file.name, wait_time_s=20, location="LOCAL") + import pprint for result in results.contacted.values(): - # Expecting: The job completion code (CC) was not in the job log....." assert result.get("changed") is False assert re.search(r'completion code', repr(result.get("msg"))) assert result.get("jobs")[0].get("job_id") is not None -# Should have a JCL ERROR def test_negative_job_submit_local_jcl_invalid_user(ansible_zos_module): tmp_file = tempfile.NamedTemporaryFile(delete=True) with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_INVALID_USER) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL") + for result in results.contacted.values(): - # Expecting: The job completion code (CC) was not in the job log....." assert result.get("changed") is False - assert re.search(r'return code was not available', repr(result.get("msg"))) - assert re.search(r'error SEC', repr(result.get("msg"))) + assert re.search(r'please review the error for further details', repr(result.get("msg"))) + assert re.search(r'please review the job log for status SEC', repr(result.get("msg"))) assert result.get("jobs")[0].get("job_id") is not None - assert re.search(r'SEC', repr(result.get("jobs")[0].get("ret_code").get("msg_text"))) + assert re.search(r'please review the job log for status SEC', repr(result.get("jobs")[0].get("ret_code").get("msg_txt"))) -def test_negative_job_submit_local_jcl_typrun_scan(ansible_zos_module): +def test_job_submit_local_jcl_typrun_scan(ansible_zos_module): tmp_file = tempfile.NamedTemporaryFile(delete=True) with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_TYPRUN_SCAN) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL") + results = hosts.all.zos_job_submit(src=tmp_file.name, + location="LOCAL", + wait_time_s=20, + encoding={ + "from": "UTF-8", + "to": "IBM-1047" + },) + for result in results.contacted.values(): + assert result.get("changed") is False + assert result.get("jobs")[0].get("job_id") is not None + assert re.search(r'run with special job processing TYPRUN=SCAN', repr(result.get("jobs")[0].get("ret_code").get("msg_txt"))) + assert result.get("jobs")[0].get("ret_code").get("code") is None + assert result.get("jobs")[0].get("ret_code").get("msg") == "TYPRUN=SCAN" + assert result.get("jobs")[0].get("ret_code").get("msg_code") is None + + +def test_job_submit_local_jcl_typrun_copy(ansible_zos_module): + tmp_file = tempfile.NamedTemporaryFile(delete=True) + with open(tmp_file.name, "w") as f: + f.write(JCL_FILE_CONTENTS_TYPRUN_COPY) + hosts = ansible_zos_module + results = hosts.all.zos_job_submit(src=tmp_file.name, + location="LOCAL", + wait_time_s=20, + encoding={ + "from": "UTF-8", + "to": "IBM-1047" + },) + import pprint + for result in results.contacted.values(): + pprint.pprint(result) + assert result.get("changed") is False + assert result.get("jobs")[0].get("job_id") is not None + assert re.search(r'please review the job log', repr(result.get("jobs")[0].get("ret_code").get("msg_txt"))) + assert result.get("jobs")[0].get("ret_code").get("code") is None + assert result.get("jobs")[0].get("ret_code").get("msg") is None + assert result.get("jobs")[0].get("ret_code").get("msg_code") is None + + +def test_job_submit_local_jcl_typrun_hold(ansible_zos_module): + tmp_file = tempfile.NamedTemporaryFile(delete=True) + with open(tmp_file.name, "w") as f: + f.write(JCL_FILE_CONTENTS_TYPRUN_HOLD) + hosts = ansible_zos_module + results = hosts.all.zos_job_submit(src=tmp_file.name, + location="LOCAL", + wait_time_s=20, + encoding={ + "from": "UTF-8", + "to": "IBM-1047" + },) for result in results.contacted.values(): - # Expecting: The job completion code (CC) was not in the job log....." assert result.get("changed") is False - assert re.search(r'return code was not available', repr(result.get("msg"))) - assert re.search(r'error ? ?', repr(result.get("msg"))) assert result.get("jobs")[0].get("job_id") is not None - assert result.get("jobs")[0].get("ret_code").get("msg_text") == "?" + assert re.search(r'long running job', repr(result.get("jobs")[0].get("ret_code").get("msg_txt"))) + assert result.get("jobs")[0].get("ret_code").get("code") is None + assert result.get("jobs")[0].get("ret_code").get("msg") == "AC" + assert result.get("jobs")[0].get("ret_code").get("msg_code") is None + + +def test_job_submit_local_jcl_typrun_jclhold(ansible_zos_module): + tmp_file = tempfile.NamedTemporaryFile(delete=True) + with open(tmp_file.name, "w") as f: + f.write(JCL_FILE_CONTENTS_TYPRUN_JCLHOLD) + hosts = ansible_zos_module + results = hosts.all.zos_job_submit(src=tmp_file.name, + location="LOCAL", + wait_time_s=20, + encoding={ + "from": "UTF-8", + "to": "IBM-1047" + },) + for result in results.contacted.values(): + assert result.get("changed") is False + assert result.get("jobs")[0].get("job_id") is not None + assert re.search(r'long running job', repr(result.get("jobs")[0].get("ret_code").get("msg_txt"))) + assert result.get("jobs")[0].get("ret_code").get("code") is None + assert result.get("jobs")[0].get("ret_code").get("msg") == "AC" + assert result.get("jobs")[0].get("ret_code").get("msg_code") is None # This test case is related to the following GitHub issues: @@ -807,4 +959,4 @@ def test_zoau_bugfix_invalid_utf8_chars(ansible_zos_module): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=TEMP_PATH, state="absent") \ No newline at end of file diff --git a/tests/unit/test_zoau_version_checker_unit.py b/tests/unit/test_zoau_version_checker_unit.py index 96031f4a1..15bcce58b 100644 --- a/tests/unit/test_zoau_version_checker_unit.py +++ b/tests/unit/test_zoau_version_checker_unit.py @@ -45,10 +45,24 @@ (['1','2','1'], "2022/08/17 21:25:13 CUT V1.2.1"), (['1','2','1'], "2022/08/25 21:44:21 CUT V1.2.1 31163ab 1856"), (['1','2','1'], "2022/09/07 15:26:50 CUT V1.2.1 d2f6557 1880"), + (['1','2','1','1'], ""), (['1','2','3'], "2022/12/03 13:33:22 CUT V1.2.3 6113dc9 2512"), (['1','2','2'], "2022/12/06 20:44:00 CUT V1.2.2 ee30137 2525"), (['1','2','3'], "2023/03/16 18:17:00 CUT V1.2.3 1aa591fb 2148 PH50145"), - (['1', '2', '4', '0'], "2023/06/02 13:28:30 CUT V1.2.4.0 3b866824 2873 PH52034 826 267d9646"), + (['1','2','3','1'], ""), + (['1','2','3','2'], ""), + (['1','2','4','0'], "2023/06/02 13:28:30 CUT V1.2.4.0 3b866824 2873 PH52034 826 267d9646"), + (['1','2','4','1'], ""), + (['1','2','4','2'], ""), + (['1','2','4','3'], ""), + (['1','2','4','4'], ""), + (['1','2','4','5'], ""), + (['1','2','5','0'], ""), + (['1','2','5','1'], ""), + (['1','2','5','2'], ""), + (['1','2','5','3'], ""), + (['1','2','5','4'], ""), + (['1','2','5','6'], ""), ] From 5f743e6df0c97378c1215c10950143108c2fff21 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 2 Apr 2024 17:36:46 -0600 Subject: [PATCH 10/28] Enabler/add ansible sanity action (#1313) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Create bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Update bandit.yml * Added changelog action * Update changelog.yml * Create close-stale-issues * Update close-stale-issues Quite el workflow dispatch * Create bandit2.yml * Update bandit2.yml * Update zos_copy.py * Update zos_copy.py Me equivoque * Create ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Update ansible-test.yml * Added ac changelog * added lint as an option * Added documentation to ac_changelog * Changed 'lint' to 'command' on ac_changelog * Create * Create first version of the changelog action * Update changelog.yml * Fix changelog.yml * Change name of action Antsibull 'Changelog lint' to AC Changelog lint * Rename 'changelog.yml' to 'ac_changelog.yml * Create ac_changelog.yml * Update ac_changelog.yml * Update ac_changelog.yml * Update ac_changelog.yml * Change path in 'venv setup' on ac * Change ac_changelog.yml * Change ac_changelog.yml * Change ac_changelog.yml * Change ac_changelog.yml * Removed not required github actions * Update zos_copy.py * Update ac_changelog.yml * Create 'ac-ansible-test.yml' * Test * Delete test changelog * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix ac ansible sanity * Fix paths * Delete commented lines * Delete weird changes * Delete weird changes * Update ac-ansible-test-sanity.yml --------- Co-authored-by: Fernando Flores Co-authored-by: André Marcel Gutiérrez Benítez <68956970+AndreMarcel99@users.noreply.github.com> --- .github/workflows/ac-ansible-test-sanity.yml | 71 ++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 .github/workflows/ac-ansible-test-sanity.yml diff --git a/.github/workflows/ac-ansible-test-sanity.yml b/.github/workflows/ac-ansible-test-sanity.yml new file mode 100644 index 000000000..1354195a5 --- /dev/null +++ b/.github/workflows/ac-ansible-test-sanity.yml @@ -0,0 +1,71 @@ +name: AC Ansible sanity + +on: + pull_request: + branches: + - dev + - staging* + paths-ignore: + - '**.tar.gz' + - 'pycache/**' + - '.ansible-lint' + - 'cache/**' + - '.DS_Store' + - '.git/**' + - '.github/**' + - '.gitignore' + - '.python-version' + - '.pytest_cache/**' + - '.vscode/**' + - 'Jenkinsfile' + - 'ac' + - 'ansible.cfg' + - 'changelogs/**' + - 'collections/**' + - 'docs/**' + - 'scripts/**' + - 'test_config.yml' + - 'tests/*.ini' + - 'tests/*.py' + - 'tests/.pytest_cache' + - 'tests/pycache' + - 'tests/functional' + - 'tests/helpers' + - 'tests/requirements.txt' + - 'tests/unit' + - 'tests/sanity/ignore-*' + - 'venv*' + +jobs: + ansible-sanity: + runs-on: ubuntu-latest + env: + branch: ${{ github.event.pull_request.head.ref }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.11 + + - name: Set up venv + run: | + python -m pip install --upgrade pip + pip install virtualenv + mkdir venv + virtualenv venv/venv-2.16 + + - name: Install dependencies + run: | + source venv/venv-2.16/bin/activate + python -m pip install --upgrade pip + pip install ansible + + - name: Run ac-sanity + run: | + source venv/venv-2.16/bin/activate + ./ac --ac-build + ./ac --ac-sanity From 3d248c42e09bfb45d0c50938236b50378ed07256 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= <68956970+AndreMarcel99@users.noreply.github.com> Date: Tue, 2 Apr 2024 17:38:18 -0600 Subject: [PATCH 11/28] [Bugfix][1201]Zos_mvs_raw_ignores_tmp_hlq (#1320) * Add first iteration * Fix mvs_raw * Add another format * Add define * Add parms to avoid fails * Quick fix to not avoid tmphlq * Fix sanity issues * Fix white spaces * Return call of hlq * Add fragment * Fix capital letters * Change fragment * Fix case sensitive data set * Fix not exist dataset * Return dataset * Fix upper case for latest dataset and change of datasize from dtouch * Fix upper case and lower case * Change typo * Fix documentation * Fix not match * Unit testing to uppercase * Fis uppercases in mvs raw * Add uppercase * New problem ID * Remove unnecesary function and add KSDS solution --- .../1320-Zos_mvs_raw_ignores_tmp_hlq.yml | 5 + plugins/module_utils/zos_mvs_raw.py | 6 +- plugins/modules/zos_mvs_raw.py | 260 ++++++++---------- .../modules/test_zos_mvs_raw_func.py | 88 +++--- tests/unit/test_zos_mvs_raw_unit.py | 80 +++--- 5 files changed, 210 insertions(+), 229 deletions(-) create mode 100644 changelogs/fragments/1320-Zos_mvs_raw_ignores_tmp_hlq.yml diff --git a/changelogs/fragments/1320-Zos_mvs_raw_ignores_tmp_hlq.yml b/changelogs/fragments/1320-Zos_mvs_raw_ignores_tmp_hlq.yml new file mode 100644 index 000000000..058faf66e --- /dev/null +++ b/changelogs/fragments/1320-Zos_mvs_raw_ignores_tmp_hlq.yml @@ -0,0 +1,5 @@ +bugfixes: + - zos_mvs_raw - The module ignored the value of `tmp_hlq` option when creating temporary data sets. + Fix now honors the value if provided and uses it as High Level Qualifier for temporary data sets created + during the module execution. + (https://github.com/ansible-collections/ibm_zos_core/pull/1320). \ No newline at end of file diff --git a/plugins/module_utils/zos_mvs_raw.py b/plugins/module_utils/zos_mvs_raw.py index 7c2badf84..466775939 100644 --- a/plugins/module_utils/zos_mvs_raw.py +++ b/plugins/module_utils/zos_mvs_raw.py @@ -24,7 +24,7 @@ class MVSCmd(object): """ @staticmethod - def execute(pgm, dds, parm="", debug=False, verbose=False): + def execute(pgm, dds, parm="", debug=False, verbose=False, tmp_hlq=None): """Execute an unauthorized MVS command. Args: @@ -36,9 +36,10 @@ def execute(pgm, dds, parm="", debug=False, verbose=False): MVSCmdResponse: The response of the command. """ module = AnsibleModuleHelper(argument_spec={}) - command = "mvscmd {0} {1} {2} ".format( + command = "mvscmd {0} {1} {2} {3}".format( "-d" if debug else "", "-v" if verbose else "", + "--tmphlq={0}".format(tmp_hlq.upper()) if tmp_hlq else "", MVSCmd._build_command(pgm, dds, parm), ) rc, out, err = module.run_command(command) @@ -64,7 +65,6 @@ def execute_authorized(pgm, dds, parm="", debug=False, verbose=False, tmp_hlq=No "--tmphlq={0}".format(tmp_hlq.upper()) if tmp_hlq else "", MVSCmd._build_command(pgm, dds, parm), ) - rc, out, err = module.run_command(command) return MVSCmdResponse(rc, out, err) diff --git a/plugins/modules/zos_mvs_raw.py b/plugins/modules/zos_mvs_raw.py index 502d2ead7..a440c31c6 100644 --- a/plugins/modules/zos_mvs_raw.py +++ b/plugins/modules/zos_mvs_raw.py @@ -96,16 +96,16 @@ - Maps to DSNTYPE on z/OS. type: str choices: - - library - - pds - - pdse - - large - - basic - - seq - - rrds - - esds - - lds - - ksds + - LIBRARY + - PDS + - PDSE + - LARGE + - BASIC + - SEQ + - RRDS + - ESDS + - LDS + - KSDS disposition: description: - I(disposition) indicates the status of a data set. @@ -174,12 +174,12 @@ using I(space_primary) and I(space_secondary). type: str choices: - - trk - - cyl - - b - - k - - m - - g + - TRK + - CYL + - B + - K + - M + - G space_primary: description: - The primary amount of space to allocate for a new data set. @@ -325,11 +325,11 @@ - The format and characteristics of the records for new data set. type: str choices: - - u - - vb - - vba - - fb - - fba + - U + - VB + - VBA + - FB + - FBA return_content: description: - Determines how content should be returned to the user. @@ -505,11 +505,11 @@ a UNIX file would normally be treated as a stream of bytes. type: str choices: - - u - - vb - - vba - - fb - - fba + - U + - VB + - VBA + - FB + - FBA return_content: description: - Determines how content should be returned to the user. @@ -717,16 +717,16 @@ - Maps to DSNTYPE on z/OS. type: str choices: - - library - - pds - - pdse - - large - - basic - - seq - - rrds - - esds - - lds - - ksds + - LIBRARY + - PDS + - PDSE + - LARGE + - BASIC + - SEQ + - RRDS + - ESDS + - LDS + - KSDS disposition: description: - I(disposition) indicates the status of a data set. @@ -795,12 +795,12 @@ using I(space_primary) and I(space_secondary). type: str choices: - - trk - - cyl - - b - - k - - m - - g + - TRK + - CYL + - B + - K + - M + - G space_primary: description: - The primary amount of space to allocate for a new data set. @@ -946,11 +946,11 @@ - The format and characteristics of the records for new data set. type: str choices: - - u - - vb - - vba - - fb - - fba + - U + - VB + - VBA + - FB + - FBA return_content: description: - Determines how content should be returned to the user. @@ -1124,11 +1124,11 @@ a UNIX file would normally be treated as a stream of bytes. type: str choices: - - u - - vb - - vba - - fb - - fba + - U + - VB + - VBA + - FB + - FBA return_content: description: - Determines how content should be returned to the user. @@ -1300,13 +1300,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: seq + type: SEQ space_primary: 5 space_secondary: 1 - space_type: m + space_type: M volumes: - "000000" - record_format: fb + record_format: FB return_content: type: text - dd_input: @@ -1324,13 +1324,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: seq + type: SEQ space_primary: 5 space_secondary: 1 - space_type: m + space_type: M volumes: - "000000" - record_format: fb + record_format: FB return_content: type: text - dd_input: @@ -1369,13 +1369,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: seq + type: SEQ space_primary: 5 space_secondary: 1 - space_type: m + space_type: M volumes: - "000000" - record_format: fb + record_format: FB return_content: type: text - dd_input: @@ -1398,15 +1398,15 @@ disposition: new replace: yes backup: yes - type: seq + type: SEQ space_primary: 5 space_secondary: 1 - space_type: m + space_type: M volumes: - "000000" - "111111" - "SCR002" - record_format: fb + record_format: FB return_content: type: text - dd_input: @@ -1628,10 +1628,6 @@ backups = [] -# Use of global tmphlq to keep coherent classes definitions -g_tmphlq = "" - - def run_module(): """Executes all module-related functions. @@ -1651,7 +1647,7 @@ def run_module(): type="str", choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], ), - space_type=dict(type="str", choices=["trk", "cyl", "b", "k", "m", "g"]), + space_type=dict(type="str", choices=["TRK", "CYL", "B", "K", "M", "G"]), space_primary=dict(type="int"), space_secondary=dict(type="int"), volumes=dict(type="raw"), @@ -1664,16 +1660,16 @@ def run_module(): type=dict( type="str", choices=[ - "library", - "pds", - "pdse", - "seq", - "basic", - "large", - "ksds", - "rrds", - "lds", - "esds", + "LIBRARY", + "PDS", + "PDSE", + "SEQ", + "BASIC", + "LARGE", + "KSDS", + "RRDS", + "LDS", + "ESDS", ], ), encryption_key_1=dict( @@ -1695,7 +1691,7 @@ def run_module(): key_length=dict(type="int", no_log=False), key_offset=dict(type="int", no_log=False), record_length=dict(type="int"), - record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), + record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), return_content=dict( type="dict", options=dict( @@ -1770,7 +1766,7 @@ def run_module(): ), block_size=dict(type="int"), record_length=dict(type="int"), - record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), + record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), return_content=dict( type="dict", options=dict( @@ -1839,8 +1835,7 @@ def run_module(): if not module.check_mode: try: parms = parse_and_validate_args(module.params) - global g_tmphlq - g_tmphlq = parms.get("tmp_hlq") + tmphlq = parms.get("tmp_hlq") dd_statements = build_dd_statements(parms) program = parms.get("program_name") program_parm = parms.get("parm") @@ -1852,6 +1847,7 @@ def run_module(): dd_statements=dd_statements, authorized=authorized, verbose=verbose, + tmp_hlq=tmphlq, ) if program_response.rc != 0 and program_response.stderr: raise ZOSRawError( @@ -1894,7 +1890,7 @@ def parse_and_validate_args(params): type="str", choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], ), - space_type=dict(type="str", choices=["trk", "cyl", "b", "k", "m", "g"]), + space_type=dict(type="str", choices=["TRK", "CYL", "B", "K", "M", "G"]), space_primary=dict(type="int"), space_secondary=dict(type="int"), volumes=dict(type=volumes), @@ -1907,16 +1903,16 @@ def parse_and_validate_args(params): type=dict( type="str", choices=[ - "library", - "pds", - "pdse", - "seq", - "basic", - "large", - "ksds", - "rrds", - "lds", - "esds", + "LIBRARY", + "PDS", + "PDSE", + "SEQ", + "BASIC", + "LARGE", + "KSDS", + "RRDS", + "LDS", + "ESDS", ], ), encryption_key_1=dict( @@ -1940,7 +1936,7 @@ def parse_and_validate_args(params): type=key_offset, default=key_offset_default, dependencies=["type"] ), record_length=dict(type="int"), - record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), + record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), return_content=dict( type="dict", options=dict( @@ -1996,7 +1992,7 @@ def parse_and_validate_args(params): ), block_size=dict(type="int"), record_length=dict(type="int"), - record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), + record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), return_content=dict( type="dict", options=dict( @@ -2088,8 +2084,8 @@ def key_length(contents, dependencies): """ if contents is None: return contents - if contents is not None and dependencies.get("type") != "ksds": - raise ValueError('key_length is only valid when "type=ksds".') + if contents is not None and dependencies.get("type") != "KSDS": + raise ValueError('key_length is only valid when "type=KSDS".') if not re.fullmatch(r"[0-9]+", str(contents)): raise ValueError( 'Invalid argument "{0}" for type "key_length".'.format(str(contents)) @@ -2109,8 +2105,8 @@ def key_offset(contents, dependencies): """ if contents is None: return contents - if contents is not None and dependencies.get("type") != "ksds": - raise ValueError('key_offset is only valid when "type=ksds".') + if contents is not None and dependencies.get("type") != "KSDS": + raise ValueError('key_offset is only valid when "type=KSDS".') if not re.fullmatch(r"[0-9]+", str(contents)): raise ValueError( @@ -2131,9 +2127,9 @@ def key_length_default(contents, dependencies): """ KEY_LENGTH = 5 length = None - if contents is None and dependencies.get("type") == "ksds": + if contents is None and dependencies.get("type") == "KSDS": length = KEY_LENGTH - elif dependencies.get("type") == "ksds": + elif dependencies.get("type") == "KSDS": length = contents return length @@ -2149,9 +2145,9 @@ def key_offset_default(contents, dependencies): """ KEY_OFFSET = 0 offset = None - if contents is None and dependencies.get("type") == "ksds": + if contents is None and dependencies.get("type") == "KSDS": offset = KEY_OFFSET - elif dependencies.get("type") == "ksds": + elif dependencies.get("type") == "KSDS": offset = contents return offset @@ -2408,7 +2404,7 @@ def build_dd_statements(parms): dd_statements = [] for dd in parms.get("dds"): dd_name = get_dd_name(dd) - dd = set_extra_attributes_in_dd(dd) + dd = set_extra_attributes_in_dd(dd, parms) data_definition = build_data_definition(dd) if data_definition is None: raise ValueError("No valid data definition found.") @@ -2444,26 +2440,27 @@ def get_dd_name(dd): return dd_name -def set_extra_attributes_in_dd(dd): +def set_extra_attributes_in_dd(dd, parms): """ - Set any extra attributes in dds like in global g_tmphlq. + Set any extra attributes in dds like in global tmp_hlq. Args: dd (dict): A single DD parm as specified in module parms. Returns: dd (dict): A single DD parm as specified in module parms. """ + tmphlq = parms.get("tmp_hlq") if dd.get("dd_data_set"): - dd.get("dd_data_set")["tmphlq"] = g_tmphlq + dd.get("dd_data_set")["tmphlq"] = tmphlq elif dd.get("dd_input"): - dd.get("dd_input")["tmphlq"] = g_tmphlq + dd.get("dd_input")["tmphlq"] = tmphlq elif dd.get("dd_output"): - dd.get("dd_output")["tmphlq"] = g_tmphlq + dd.get("dd_output")["tmphlq"] = tmphlq elif dd.get("dd_vio"): - dd.get("dd_vio")["tmphlq"] = g_tmphlq + dd.get("dd_vio")["tmphlq"] = tmphlq elif dd.get("dd_concat"): for single_dd in dd.get("dd_concat").get("dds", []): - set_extra_attributes_in_dd(single_dd) + set_extra_attributes_in_dd(single_dd, parms) return dd @@ -2572,6 +2569,7 @@ def __init__( """ self.backup = None self.return_content = ReturnContent(**(return_content or {})) + self.tmphlq = tmphlq primary_unit = space_type secondary_unit = space_type key_label1 = None @@ -2698,7 +2696,6 @@ def __init__( ) -# TODO: potentially extend the available parameters to end user class RawInputDefinition(InputDefinition): """Wrapper around InputDefinition to contain information about desired return contents. @@ -2707,7 +2704,7 @@ class RawInputDefinition(InputDefinition): InputDefinition (InputDefinition): Input DD data type to be used in a DDStatement. """ - def __init__(self, content="", return_content=None, **kwargs): + def __init__(self, content="", return_content=None, tmphlq="", **kwargs): """Initialize RawInputDefinition Args: @@ -2715,7 +2712,7 @@ def __init__(self, content="", return_content=None, **kwargs): return_content (dict, optional): Determines how content should be returned to the user. Defaults to {}. """ self.return_content = ReturnContent(**(return_content or {})) - super().__init__(content=content) + super().__init__(content=content, tmphlq=tmphlq) class RawOutputDefinition(OutputDefinition): @@ -2726,7 +2723,7 @@ class RawOutputDefinition(OutputDefinition): OutputDefinition (OutputDefinition): Output DD data type to be used in a DDStatement. """ - def __init__(self, return_content=None, **kwargs): + def __init__(self, return_content=None, tmphlq="", **kwargs): """Initialize RawOutputDefinition Args: @@ -2734,7 +2731,7 @@ def __init__(self, return_content=None, **kwargs): return_content (dict, optional): Determines how content should be returned to the user. Defaults to {}. """ self.return_content = ReturnContent(**(return_content or {})) - super().__init__() + super().__init__(tmphlq=tmphlq) class ReturnContent(object): @@ -2761,28 +2758,6 @@ def __init__(self, type=None, src_encoding=None, response_encoding=None): self.response_encoding = response_encoding -def to_bytes(size, unit): - """Convert sizes of various units to bytes. - - Args: - size (int): The size to convert. - unit (str): The unit of size. - - Returns: - int: The size converted to bytes. - """ - num_bytes = 0 - if unit == "b": - num_bytes = size - elif unit == "k": - num_bytes = size * 1024 - elif unit == "m": - num_bytes = size * 1048576 - elif unit == "g": - num_bytes = size * 1073741824 - return num_bytes - - def rename_parms(parms, name_map): """Rename parms based on a provided dictionary. @@ -2839,7 +2814,7 @@ def data_set_exists(name, volumes=None): def run_zos_program( - program, parm="", dd_statements=None, authorized=False, verbose=False + program, parm="", dd_statements=None, authorized=False, verbose=False, tmp_hlq=None ): """Run a program on z/OS. @@ -2848,6 +2823,7 @@ def run_zos_program( parm (str, optional): Additional argument string if required. Defaults to "". dd_statements (list[DDStatement], optional): DD statements to allocate for the program. Defaults to []. authorized (bool, optional): Determines if program will execute as an authorized user. Defaults to False. + tmp_hlq (str, optional): Arguments overwrite variable tmp_hlq Returns: MVSCmdResponse: Holds the response information for program execution. @@ -2857,11 +2833,11 @@ def run_zos_program( response = None if authorized: response = MVSCmd.execute_authorized( - pgm=program, parm=parm, dds=dd_statements, verbose=verbose + pgm=program, parm=parm, dds=dd_statements, verbose=verbose, tmp_hlq=tmp_hlq ) else: response = MVSCmd.execute( - pgm=program, parm=parm, dds=dd_statements, verbose=verbose + pgm=program, parm=parm, dds=dd_statements, verbose=verbose, tmp_hlq=tmp_hlq ) return response diff --git a/tests/functional/modules/test_zos_mvs_raw_func.py b/tests/functional/modules/test_zos_mvs_raw_func.py index fd20a6a92..ca5b6384d 100644 --- a/tests/functional/modules/test_zos_mvs_raw_func.py +++ b/tests/functional/modules/test_zos_mvs_raw_func.py @@ -62,7 +62,7 @@ def test_disposition_new(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", return_content=dict(type="text"), ), ), @@ -86,7 +86,7 @@ def test_dispositions_for_existing_data_set(ansible_zos_module, disposition): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="seq", state="present", replace=True + name=default_data_set, type="SEQ", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -118,7 +118,7 @@ def test_list_cat_for_existing_data_set_with_tmp_hlq_option(ansible_zos_module, default_volume = volumes.get_available_vol() default_data_set = get_tmp_ds_name()[:25] hosts.all.zos_data_set( - name=default_data_set, type="seq", state="present", replace=True + name=default_data_set, type="SEQ", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -133,12 +133,12 @@ def test_list_cat_for_existing_data_set_with_tmp_hlq_option(ansible_zos_module, return_content=dict(type="text"), replace=True, backup=True, - type="seq", + type="SEQ", space_primary=5, space_secondary=1, - space_type="m", + space_type="M", volumes=default_volume, - record_format="fb" + record_format="FB" ), ), dict(dd_input=dict(dd_name=SYSIN_DD, content=IDCAMS_STDIN)), @@ -172,7 +172,7 @@ def test_new_disposition_for_data_set_members(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=DEFAULT_DATA_SET_WITH_MEMBER, disposition="new", - type="pds", + type="PDS", directory_blocks=15, return_content=dict(type="text"), ), @@ -197,7 +197,7 @@ def test_dispositions_for_existing_data_set_members(ansible_zos_module, disposit default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_WITH_MEMBER = default_data_set + '(MEM)' hosts.all.zos_data_set( - name=default_data_set, type="pds", state="present", replace=True + name=default_data_set, type="PDS", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -234,7 +234,7 @@ def test_normal_dispositions_data_set(ansible_zos_module, normal_disposition, ch default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="seq", + type="SEQ", state="present", replace=True, volumes=[volume_1], @@ -267,11 +267,11 @@ def test_normal_dispositions_data_set(ansible_zos_module, normal_disposition, ch @pytest.mark.parametrize( "space_type,primary,secondary,expected", [ - ("trk", 3, 1, 169992), - ("cyl", 3, 1, 2549880), - ("b", 3, 1, 56664), - ("k", 3, 1, 56664), - ("m", 3, 1, 2889864), + ("TRK", 3, 1, 169992), + ("CYL", 3, 1, 2549880), + ("B", 3, 1, 56664), + ("K", 3, 1, 56664), + ("M", 3, 1, 3003192), ], ) def test_space_types(ansible_zos_module, space_type, primary, secondary, expected): @@ -288,7 +288,7 @@ def test_space_types(ansible_zos_module, space_type, primary, secondary, expecte dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", space_primary=primary, space_secondary=secondary, space_type=space_type, @@ -315,7 +315,7 @@ def test_space_types(ansible_zos_module, space_type, primary, secondary, expecte @pytest.mark.parametrize( "data_set_type", - ["pds", "pdse", "large", "basic", "seq"], + ["PDS", "PDSE", "LARGE", "BASIC", "SEQ"], ) def test_data_set_types_non_vsam(ansible_zos_module, data_set_type, volumes_on_systems): try: @@ -351,7 +351,7 @@ def test_data_set_types_non_vsam(ansible_zos_module, data_set_type, volumes_on_s @pytest.mark.parametrize( "data_set_type", - ["ksds", "rrds", "lds", "esds"], + ["KSDS", "RRDS", "LDS", "ESDS"], ) def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_systems): try: @@ -374,7 +374,7 @@ def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_syste volumes=[volume_1], ), ) - if data_set_type != "ksds" + if data_set_type != "KSDS" else dict( dd_data_set=dict( dd_name=SYSPRINT_DD, @@ -393,14 +393,14 @@ def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_syste # * because that means data set exists and is VSAM so we can't read it results = hosts.all.command(cmd="head \"//'{0}'\"".format(default_data_set)) for result in results.contacted.values(): - assert "EDC5041I" in result.get("stderr", "") + assert "EDC5041I" or "EDC5049I" in result.get("stderr", "") finally: hosts.all.zos_data_set(name=default_data_set, state="absent") @pytest.mark.parametrize( "record_format", - ["u", "vb", "vba", "fb", "fba"], + ["U", "VB", "VBA", "FB", "FBA"], ) def test_record_formats(ansible_zos_module, record_format, volumes_on_systems): try: @@ -453,7 +453,7 @@ def test_return_content_type(ansible_zos_module, return_content_type, expected, default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="seq", + type="SEQ", state="present", replace=True, volumes=[volume_1], @@ -505,7 +505,7 @@ def test_return_text_content_encodings( default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="seq", + type="SEQ", state="present", replace=True, volumes=[volume_1], @@ -544,7 +544,7 @@ def test_reuse_existing_data_set(ansible_zos_module): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="seq", state="present", replace=True + name=default_data_set, type="SEQ", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="IDCAMS", @@ -555,7 +555,7 @@ def test_reuse_existing_data_set(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", reuse=True, return_content=dict(type="text"), ), @@ -577,7 +577,7 @@ def test_replace_existing_data_set(ansible_zos_module): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="seq", state="present", replace=True + name=default_data_set, type="SEQ", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="IDCAMS", @@ -588,7 +588,7 @@ def test_replace_existing_data_set(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", replace=True, return_content=dict(type="text"), ), @@ -619,7 +619,7 @@ def test_replace_existing_data_set_make_backup(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", replace=True, return_content=dict(type="text"), ), @@ -636,7 +636,7 @@ def test_replace_existing_data_set_make_backup(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", replace=True, backup=True, return_content=dict(type="text"), @@ -687,7 +687,7 @@ def test_input_empty(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", return_content=dict(type="text"), ), ), @@ -719,7 +719,7 @@ def test_input_large(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", return_content=dict(type="text"), ), ), @@ -752,7 +752,7 @@ def test_input_provided_as_list(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", return_content=dict(type="text"), ), ), @@ -792,7 +792,7 @@ def test_input_return_content_types(ansible_zos_module, return_content_type, exp dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", ), ), dict( @@ -844,7 +844,7 @@ def test_input_return_text_content_encodings( dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", ), ), dict( @@ -1164,7 +1164,7 @@ def test_file_record_length(ansible_zos_module, record_length): @pytest.mark.parametrize( "record_format", - ["u", "vb", "vba", "fb", "fba"], + ["U", "VB", "VBA", "FB", "FBA"], ) def test_file_record_format(ansible_zos_module, record_format): try: @@ -1353,7 +1353,7 @@ def test_concatenation_with_data_set_dd_and_response(ansible_zos_module): dd_data_set=dict( data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", return_content=dict(type="text"), ) ), @@ -1361,7 +1361,7 @@ def test_concatenation_with_data_set_dd_and_response(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="seq", + type="SEQ", ) ), ], @@ -1391,8 +1391,8 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu hosts = ansible_zos_module default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_2 = get_tmp_ds_name() - hosts.all.zos_data_set(name=default_data_set, state="present", type="seq") - hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="present", type="seq") + hosts.all.zos_data_set(name=default_data_set, state="present", type="SEQ") + hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="present", type="SEQ") results = hosts.all.zos_mvs_raw( program_name="idcams", auth=True, @@ -1405,7 +1405,7 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu dd_data_set=dict( data_set_name=default_data_set, disposition="new", - type="seq", + type="SEQ", replace=True, backup=True, return_content=dict(type="text"), @@ -1415,7 +1415,7 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="seq", + type="SEQ", replace=True, backup=True, ) @@ -1462,7 +1462,7 @@ def test_concatenation_with_data_set_member(ansible_zos_module): default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_2 = get_tmp_ds_name() DEFAULT_DATA_SET_WITH_MEMBER = default_data_set + '(MEM)' - hosts.all.zos_data_set(name=default_data_set, state="present", type="pds") + hosts.all.zos_data_set(name=default_data_set, state="present", type="PDS") hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="absent") results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -1482,7 +1482,7 @@ def test_concatenation_with_data_set_member(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="seq", + type="SEQ", ) ), ], @@ -1538,7 +1538,7 @@ def test_concatenation_with_unix_dd_and_response_datasets(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="seq", + type="SEQ", ) ), ], @@ -1766,7 +1766,7 @@ def test_concatenation_all_dd_types(ansible_zos_module, dds, input_pos, input_co try: hosts = ansible_zos_module default_data_set = "ANSIBLE.USER.PRIVATE.TEST" - hosts.all.zos_data_set(name=default_data_set, state="present", type="seq") + hosts.all.zos_data_set(name=default_data_set, state="present", type="SEQ") hosts.all.file(path=DEFAULT_PATH, state="directory") hosts.all.file(path=DEFAULT_PATH_WITH_FILE, state="absent") results = hosts.all.zos_mvs_raw(program_name="idcams", auth=True, dds=dds) diff --git a/tests/unit/test_zos_mvs_raw_unit.py b/tests/unit/test_zos_mvs_raw_unit.py index e50734756..f528412da 100644 --- a/tests/unit/test_zos_mvs_raw_unit.py +++ b/tests/unit/test_zos_mvs_raw_unit.py @@ -59,7 +59,7 @@ def run_command(self, *args, **kwargs): "new", "keep", "keep", - "cyl", + "CYL", 5, 1, "smsclas1", @@ -67,17 +67,17 @@ def run_command(self, *args, **kwargs): "smsclas1", 80, "SOMEKEYLAB100", - "library", + "LIBRARY", {"label": "keyforme", "encoding": "h"}, {"label": "keyforme2", "encoding": "h"}, - "u", + "U", ), ( "data.set.name(mem1)", "shr", "delete", "keep", - "trk", + "TRK", "5", 1, "smsclas1", @@ -85,17 +85,17 @@ def run_command(self, *args, **kwargs): "smsclas3", 120, "somekeylab1", - "basic", + "BASIC", {"label": "keyforme", "encoding": "l"}, {"label": "keyforme2", "encoding": "h"}, - "fb", + "FB", ), ( "DATA.NAME.HERE.NOW", "old", "catalog", "uncatalog", - "b", + "B", 55, "100", "SMSCLASS", @@ -103,17 +103,17 @@ def run_command(self, *args, **kwargs): "smscD@s3", 120, "keyfor342fdsme", - "large", + "LARGE", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "fba", + "FBA", ), ( "DAT@.now", "mod", "delete", "uncatalog", - "g", + "G", 1, "9", "SMSCLASS", @@ -121,17 +121,17 @@ def run_command(self, *args, **kwargs): "", 120, "keyfor342fdsme", - "pdse", + "PDSE", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "vb", + "VB", ), ( "DAT$.now", "new", "delete", "keep", - "m", + "M", 1, 9, "SMSCLASS", @@ -139,10 +139,10 @@ def run_command(self, *args, **kwargs): "", 0, "", - "lds", + "LDS", {"label": "keyforME", "encoding": "l"}, {"label": "keyyyyy343asdfasfsdfa", "encoding": "l"}, - "vba", + "VBA", ), ], ) @@ -237,7 +237,7 @@ def test_argument_parsing_data_set( "delete", 0, 100, - "fb", + "FB", "record", "r", ["ocreat", "oappend", "onoctty"], @@ -248,14 +248,14 @@ def test_argument_parsing_data_set( "delete", 200, "100", - "fba", + "FBA", "record", "w", ["oappend", "osync"], ), - ("/u/OEUSR01", "keep", "delete", 0, 100, "vb", "binary", "rw", ["ononblock"]), - ("/u/testmeee", "keep", "delete", 0, 100, "vba", "record", "read_only", []), - ("/u/hellow/d/or4ld", "keep", "keep", 0, 100, "u", "text", "write_only", []), + ("/u/OEUSR01", "keep", "delete", 0, 100, "VB", "binary", "rw", ["ononblock"]), + ("/u/testmeee", "keep", "delete", 0, 100, "VBA", "record", "read_only", []), + ("/u/hellow/d/or4ld", "keep", "keep", 0, 100, "U", "text", "write_only", []), ], ) def test_argument_parsing_unix( @@ -338,7 +338,7 @@ def test_argument_parsing_unix( "old", "keep", "keep", - "cyl", + "CYL", 5, 1, "smsclas1", @@ -346,17 +346,17 @@ def test_argument_parsing_unix( "smsclas1", 80, "SOMEKEYLAB100", - "library", + "LIBRARY", {"label": "keyforme", "encoding": "h"}, {"label": "keyforme2", "encoding": "h"}, - "u", + "U", ), ( "data.set.name(mem1waytoolong)", "excl", "delete", "keep", - "trk", + "TRK", "5", 1, "smsclas1", @@ -364,10 +364,10 @@ def test_argument_parsing_unix( "smsclas3", 120, "somekeylab1", - "basic", + "BASIC", {"label": "keyforme", "encoding": "l"}, {"label": "keyforme2", "encoding": "h"}, - "fb", + "FB", ), ( "DATA.NAME.HERE.NOW", @@ -382,17 +382,17 @@ def test_argument_parsing_unix( "smscD@s3", 120, "keyfor342fdsme", - "large", + "LARGE", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "fba", + "FBA", ), ( "DAT@.now", "mod", "delete", "uncatalog", - "g", + "G", 1, "9", "SMSCLASSsss", @@ -400,17 +400,17 @@ def test_argument_parsing_unix( "", 120, "keyfor342fdsme", - "pdse", + "PDSE", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "vb", + "VB", ), ( "DAT$.now", "new", "delete", "meep", - "m", + "M", 1, 9, "SMSCLASS", @@ -418,10 +418,10 @@ def test_argument_parsing_unix( "", 0, "", - "ksdss", + "KSDSS", {"label": "keyforME", "encoding": "l"}, {"label": "keyyyyy343asdfasfsdfa", "encoding": "l"}, - "vba", + "VBA", ), ], ) @@ -525,7 +525,7 @@ def test_argument_parsing_data_set_failure_path( "delete", 200, "100", - "fba", + "FBA", "record", "w", ["append", "osync"], @@ -537,12 +537,12 @@ def test_argument_parsing_data_set_failure_path( "delete", 0, 100, - "vba", + "VBA", "record", "read_only", ["hello"], ), - ("/u/hellow/d/or4ld", "meep", "keep", 0, 100, "u", "text", None, []), + ("/u/hellow/d/or4ld", "meep", "keep", 0, 100, "U", "text", None, []), ], ) def test_argument_parsing_unix_failure_path( @@ -620,7 +620,7 @@ def test_ksds_defaults( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "ksds", + "type": "KSDS", } }, ], @@ -663,7 +663,7 @@ def test_ksds_exception_key_length( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "esds", + "type": "ESDS", "key_length": 5, } }, @@ -693,7 +693,7 @@ def test_ksds_exception_key_offset( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "esds", + "type": "ESDS", "key_offset": 5, } }, From 2697e32b474ec33832e2977c3e73246904c3e5ad Mon Sep 17 00:00:00 2001 From: Rich Parker Date: Thu, 4 Apr 2024 12:23:10 -0400 Subject: [PATCH 12/28] Removed a test condition that obscured duration (#1364) * removed a function in a test that would obscure if null durations are coming back it appears this issue is resolved. * added changelog --------- Co-authored-by: Demetri --- changelogs/fragments/1032-clean-job_submit-test.yml | 3 +++ tests/functional/modules/test_zos_job_submit_func.py | 6 ++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/1032-clean-job_submit-test.yml diff --git a/changelogs/fragments/1032-clean-job_submit-test.yml b/changelogs/fragments/1032-clean-job_submit-test.yml new file mode 100644 index 000000000..bb4248aec --- /dev/null +++ b/changelogs/fragments/1032-clean-job_submit-test.yml @@ -0,0 +1,3 @@ +trivial: + - test_zos_job_submit_func.py - Removed test setting that was covering a missing duration value. + (https://github.com/ansible-collections/ibm_zos_core/pull/1364). diff --git a/tests/functional/modules/test_zos_job_submit_func.py b/tests/functional/modules/test_zos_job_submit_func.py index bae4dbb36..c148b6223 100644 --- a/tests/functional/modules/test_zos_job_submit_func.py +++ b/tests/functional/modules/test_zos_job_submit_func.py @@ -695,10 +695,8 @@ def test_job_submit_max_rc(ansible_zos_module, args): #Expecting: - "The job return code 8 was non-zero in the job output, this job has failed" # - Consider using module zos_job_query to poll for a long running job or # increase option \\'wait_times_s` to a value greater than 10.", - if result.get('duration'): - duration = result.get('duration') - else: - duration = 0 + + duration = result.get('duration') if duration >= args["wait_time_s"]: re.search(r'long running job', repr(result.get("msg"))) From aeafa82cb02c19068f8f704b093a6b07dec15392 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Fri, 5 Apr 2024 11:20:17 -0600 Subject: [PATCH 13/28] Updated actions to only run when PR is not draft (#1412) * Updated actions to only run when PR is not draft * Add test * Modified draft condition * Update zos_apf.py * Modified workflows * test * test --- .github/workflows/ac-ansible-test-sanity.yml | 2 ++ .github/workflows/ac-bandit.yml | 6 +++- .github/workflows/ac-galaxy-importer.yml | 34 +++++++++++++++++++- .github/workflows/ac_changelog.yml | 2 ++ 4 files changed, 42 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ac-ansible-test-sanity.yml b/.github/workflows/ac-ansible-test-sanity.yml index 1354195a5..d0c4b58d2 100644 --- a/.github/workflows/ac-ansible-test-sanity.yml +++ b/.github/workflows/ac-ansible-test-sanity.yml @@ -2,6 +2,7 @@ name: AC Ansible sanity on: pull_request: + types: [opened, synchronize, reopened, ready_for_review] branches: - dev - staging* @@ -38,6 +39,7 @@ on: jobs: ansible-sanity: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest env: branch: ${{ github.event.pull_request.head.ref }} diff --git a/.github/workflows/ac-bandit.yml b/.github/workflows/ac-bandit.yml index 288fb92b1..1b93e40a4 100644 --- a/.github/workflows/ac-bandit.yml +++ b/.github/workflows/ac-bandit.yml @@ -2,12 +2,16 @@ name: AC Bandit on: pull_request: + types: [opened, synchronize, reopened, ready_for_review] branches: - dev - staging* - + paths: + - 'plugins/**' + jobs: bandit: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ac-galaxy-importer.yml b/.github/workflows/ac-galaxy-importer.yml index 271f01c22..563d37ada 100644 --- a/.github/workflows/ac-galaxy-importer.yml +++ b/.github/workflows/ac-galaxy-importer.yml @@ -2,12 +2,44 @@ name: AC Galaxy Importer on: pull_request: + types: [opened, synchronize, reopened, ready_for_review] branches: - dev - staging* - + paths-ignore: + - '**.tar.gz' + - 'pycache/**' + - '.ansible-lint' + - 'cache/**' + - '.DS_Store' + - '.git/**' + - '.github/**' + - '.gitignore' + - '.python-version' + - '.pytest_cache/**' + - '.vscode/**' + - 'Jenkinsfile' + - 'ac' + - 'ansible.cfg' + - 'changelogs/**' + - 'collections/**' + - 'docs/**' + - 'scripts/**' + - 'test_config.yml' + - 'tests/*.ini' + - 'tests/*.py' + - 'tests/.pytest_cache' + - 'tests/pycache' + - 'tests/functional' + - 'tests/helpers' + - 'tests/requirements.txt' + - 'tests/unit' + - 'tests/sanity/ignore-*' + - 'venv*' + jobs: galaxy-importer: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ac_changelog.yml b/.github/workflows/ac_changelog.yml index 523e207b9..e3b3f3cc4 100644 --- a/.github/workflows/ac_changelog.yml +++ b/.github/workflows/ac_changelog.yml @@ -2,6 +2,7 @@ name: AC Changelog Lint on: pull_request: + types: [opened, synchronize, reopened, ready_for_review] paths: - 'changelogs/fragments/*' branches: @@ -10,6 +11,7 @@ on: jobs: lint: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: From d8b87a42117c99144bedd93e4f0b5f7964fc112c Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Fri, 5 Apr 2024 11:20:49 -0600 Subject: [PATCH 14/28] [Documentation][encode] Add and standarize docstring to encode.py (#1322) * Add and estandarize docstring to encode.py * Create changelog fragment * Modified the google style to numpy * Update changelog fragment * Standarize numpy style * Update encode.py added newline to address pep8 error * Fixed some dcostrings * Modified docstrings --------- Co-authored-by: Rich Parker Co-authored-by: Fernando Flores --- .../1322-update-docstring-encode.yml | 3 + plugins/module_utils/encode.py | 357 +++++++++++++----- 2 files changed, 269 insertions(+), 91 deletions(-) create mode 100644 changelogs/fragments/1322-update-docstring-encode.yml diff --git a/changelogs/fragments/1322-update-docstring-encode.yml b/changelogs/fragments/1322-update-docstring-encode.yml new file mode 100644 index 000000000..dd5eb5389 --- /dev/null +++ b/changelogs/fragments/1322-update-docstring-encode.yml @@ -0,0 +1,3 @@ +trivial: + - encode - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1322). \ No newline at end of file diff --git a/plugins/module_utils/encode.py b/plugins/module_utils/encode.py index 195802583..f68a8ab77 100644 --- a/plugins/module_utils/encode.py +++ b/plugins/module_utils/encode.py @@ -56,10 +56,12 @@ class Defaults: @staticmethod def get_default_system_charset(): - """Get the default encoding of the current machine + """Get the default encoding of the current machine. - Returns: - str -- The encoding of the current machine + Returns + ------- + str + The encoding of the current machine. """ system_charset = locale.getdefaultlocale()[1] if system_charset is None: @@ -80,15 +82,24 @@ def get_default_system_charset(): class EncodeUtils(object): def __init__(self): """Call the coded character set conversion utility iconv - to convert a USS file from one coded character set to another - - Arguments: - module {AnsibleModule} -- The AnsibleModule object from currently running module + to convert a USS file from one coded character set to another. """ self.module = AnsibleModuleHelper(argument_spec={}) self.tmphlq = None def _validate_data_set_name(self, ds): + """Validate data set name using BetterArgParser. + + Parameters + ---------- + ds : str + The source data set name. + + Returns + ------- + str + Parsed data set name. + """ arg_defs = dict( ds=dict(arg_type="data_set"), ) @@ -97,6 +108,18 @@ def _validate_data_set_name(self, ds): return parsed_args.get("ds") def _validate_path(self, path): + """Validate path using BetterArgParser. + + Parameters + ---------- + path : str + The path. + + Returns + ------- + str + Parsed path. + """ arg_defs = dict( path=dict(arg_type="path"), ) @@ -105,6 +128,18 @@ def _validate_path(self, path): return parsed_args.get("path") def _validate_data_set_or_path(self, path): + """Validate data set or path using BetterArgParser. + + Parameters + ---------- + path : str + The path. + + Returns + ------- + str + Parsed path. + """ arg_defs = dict( path=dict(arg_type="data_set_or_path"), ) @@ -113,6 +148,18 @@ def _validate_data_set_or_path(self, path): return parsed_args.get("path") def _validate_encoding(self, encoding): + """Validate encoding using BetterArgParser. + + Parameters + --------- + encoding : str + The encoding. + + Returns + ------- + str + Parsed encoding. + """ arg_defs = dict( encoding=dict(arg_type="encoding"), ) @@ -122,16 +169,24 @@ def _validate_encoding(self, encoding): def listdsi_data_set(self, ds): """Invoke IDCAMS LISTCAT command to get the record length and space used - to estimate the space used by the VSAM data set - - Arguments: - ds: {str} -- The VSAM data set to be checked. - - Raises: - EncodeError: When any exception is raised during the conversion. - Returns: - int -- The maximum record length of the VSAM data set. - int -- The space used by the VSAM data set(KB). + to estimate the space used by the VSAM data set. + + Parameters + ---------- + ds : str + The VSAM data set to be checked. + + Returns + ------- + int + The maximum record length of the VSAM data set. + int + The space used by the VSAM data set(KB). + + Raises + ------ + EncodeError + When any exception is raised during the conversion. """ ds = self._validate_data_set_name(ds) reclen = 80 @@ -179,17 +234,24 @@ def listdsi_data_set(self, ds): return reclen, space_u def temp_data_set(self, reclen, space_u): - """Creates a temporary data set with the given record length and size - - Arguments: - size {str} -- The size of the data set - lrecl {int} -- The record length of the data set - - Returns: - str -- Name of the allocated data set - - Raises: - ZOAUException: When any exception is raised during the data set allocation. + """Creates a temporary data set with the given record length and size. + + Parameters + ---------- + lrecl : int + The record length of the data set. + space_u : str + The size of the data set. + + Returns + ------- + str + Name of the allocated data set. + + Raises + ------ + ZOAUException + When any exception is raised during the data set allocation. DatasetVerificationError: When the data set creation could not be verified. """ size = str(space_u * 2) + "K" @@ -208,12 +270,17 @@ def temp_data_set(self, reclen, space_u): return temporary_data_set.name def get_codeset(self): - """Get the list of supported encodings from the USS command 'iconv -l' + """Get the list of supported encodings from the USS command 'iconv -l'. + + Returns + ------- + Union[str] + The code set list supported in current USS platform. - Raises: - EncodeError: When any exception is raised during the conversion - Returns: - list -- The code set list supported in current USS platform + Raises + ------ + EncodeError + When any exception is raised during the conversion. """ code_set = None iconv_list_cmd = ["iconv", "-l"] @@ -226,17 +293,26 @@ def get_codeset(self): return code_set def string_convert_encoding(self, src, from_encoding, to_encoding): - """Convert the encoding of the data when the src is a normal string - - Arguments: - from_code_set: {str} -- The source code set of the string - to_code_set: {str} -- The destination code set for the string - src: {str} -- The input string content - - Raises: - EncodeError: When any exception is raised during the conversion - Returns: - str -- The string content after the encoding + """Convert the encoding of the data when the src is a normal string. + + Parameters + ---------- + src : str + The input string content. + from_encoding : str + The source code set of the string. + to_encoding : str + The destination code set for the string. + + Returns + ------- + str + The string content after the encoding. + + Raises + ------ + EncodeError + When any exception is raised during the conversion. """ from_encoding = self._validate_encoding(from_encoding) to_encoding = self._validate_encoding(to_encoding) @@ -249,19 +325,30 @@ def string_convert_encoding(self, src, from_encoding, to_encoding): return out def uss_convert_encoding(self, src, dest, from_code, to_code): - """Convert the encoding of the data in a USS file - - Arguments: - from_code: {str} -- The source code set of the input file - to_code: {str} -- The destination code set for the output file - src: {str} -- The input file name, it should be a uss file - dest: {str} -- The output file name, it should be a uss file - - Raises: - EncodeError: When any exception is raised during the conversion. - MoveFileError: When any exception is raised during moving files. - Returns: - boolean -- Indicate whether the conversion is successful or not. + """Convert the encoding of the data in a USS file. + + Parameters + ---------- + src : str + The input file name, it should be a uss file. + dest : str + The output file name, it should be a uss file. + from_code : str + The source code set of the input file. + to_code : str + The destination code set for the output file. + + Returns + ------- + bool + Indicate whether the conversion is successful or not. + + Raises + ------ + EncodeError + When any exception is raised during the conversion. + MoveFileError + When any exception is raised during moving files. """ src = self._validate_path(src) dest = self._validate_path(dest) @@ -306,18 +393,28 @@ def uss_convert_encoding(self, src, dest, from_code, to_code): def uss_convert_encoding_prev(self, src, dest, from_code, to_code): """For multiple files conversion, such as a USS path or MVS PDS data set, - use this method to split then do the conversion - - Arguments: - from_code: {str} -- The source code set of the input path - to_code: {str} -- The destination code set for the output path - src: {str} -- The input uss path or a file - dest: {str} -- The output uss path or a file - - Raises: - EncodeError: When direcotry is empty or copy multiple files to a single file - Returns: - boolean -- Indicate whether the conversion is successful or not + use this method to split then do the conversion. + + Parameters + ---------- + src : str + The input uss path or a file. + dest : str + The output uss path or a file. + from_code : str + The source code set of the input path. + to_code : str + The destination code set for the output path. + + Returns + ------- + bool + Indicate whether the conversion is successful or not. + + Raises + ------ + EncodeError + When directory is empty or copy multiple files to a single file. """ src = self._validate_path(src) dest = self._validate_path(dest) @@ -375,18 +472,28 @@ def mvs_convert_encoding( 2) MVS to USS 3) MVS to MVS - Arguments: - src: {str} -- The input MVS data set or USS path to be converted - dest: {str} -- The output MVS data set or USS path to be converted - from_code: {str} -- The source code set of the input MVS data set - to_code: {str} -- The destination code set of the output MVS data set - - Keyword Arguments: - src_type {[type]} -- The input MVS data set or type: PS, PDS, PDSE, VSAM(KSDS) (default: {None}) - dest_type {[type]} -- The output MVS data set type (default: {None}) - - Returns: - boolean -- Indicate whether the conversion is successful or not + Parameters + ---------- + src : str + The input MVS data set or USS path to be converted. + dest : str + The output MVS data set or USS path to be converted. + from_code : str + The source code set of the input MVS data set. + to_code : str + The destination code set of the output MVS data set. + + Keyword Parameters + ----------------- + src_type : str + The input MVS data set or type: PS, PDS, PDSE, VSAM(KSDS). + dest_type : str + The output MVS data set type. + + Returns + ------- + bool + Indicate whether the conversion is successful or not. """ src = self._validate_data_set_or_path(src) dest = self._validate_data_set_or_path(dest) @@ -458,11 +565,18 @@ def uss_tag_encoding(self, file_path, tag): """Tag the file/directory specified with the given code set. If `file_path` is a directory, all of the files and subdirectories will be tagged recursively. - Arguments: - file_path {str} -- Absolute file path to tag. - tag {str} -- Code set to tag the file/directory. - Raises: - TaggingError: When the chtag command fails. + + Parameters + ---------- + file_path : str + Absolute file path to tag. + tag : str + Code set to tag the file/directory. + + Raises + ------ + TaggingError + When the chtag command fails. """ is_dir = os.path.isdir(file_path) @@ -473,11 +587,18 @@ def uss_tag_encoding(self, file_path, tag): def uss_file_tag(self, file_path): """Returns the current tag set for a file. - Arguments: - file_path {str} -- USS path to the file. - Returns: - str -- Current tag set for the file, as returned by 'ls -T' - None -- If the file does not exist or the command fails. + + Parameters + ---------- + file_path : str + USS path to the file. + + Returns + ------- + str + Current tag set for the file, as returned by 'ls -T'. + None + If the file does not exist or the command fails. """ if not os.path.exists(file_path): return None @@ -500,12 +621,50 @@ def uss_file_tag(self, file_path): class EncodeError(Exception): def __init__(self, message): + """Error during encoding. + + Parameters + ---------- + message : str + Human readable string describing the exception. + + Attributes + ---------- + msg : str + Human readable string describing the exception. + """ self.msg = 'An error occurred during encoding: "{0}"'.format(message) super(EncodeError, self).__init__(self.msg) class TaggingError(Exception): def __init__(self, file_path, tag, rc, stdout, stderr): + """Error during tagging. + + Parameters + ---------- + file_path : str + File to tag. + tag : str + Tag to put in the file. + rc : int + Return code. + stdout : str + Standard output. + stderr : str + Standard error. + + Attributes + ---------- + msg : str + Human readable string describing the exception. + rc : int + Return code. + stdout : str + Standard output. + stderr : str + Standard error. + """ self.msg = 'An error occurred during tagging of {0} to {1}'.format( file_path, tag @@ -518,5 +677,21 @@ def __init__(self, file_path, tag, rc, stdout, stderr): class MoveFileError(Exception): def __init__(self, src, dest, e): + """Error while moving a file. + + Parameters + ---------- + src : str + From where the file moves. + dest : str + To where the file moves. + e : str + Exception message. + + Attributes + ---------- + msg : str + Human readable string describing the exception. + """ self.msg = "Failed when moving {0} to {1}: {2}".format(src, dest, e) super().__init__(self.msg) From 5b239b1afe04ec4800b93e044f3857ebc10e0d0c Mon Sep 17 00:00:00 2001 From: Ivan Moreno Date: Tue, 16 Apr 2024 08:46:04 -0700 Subject: [PATCH 15/28] [v1.10.0] [Enabler] Standardization of choices in modules (#1388) * Update zos_archive choices * Update zos_backup_restore choices * Update zos_copy choices * Update zos_data_set choices * Update module docs * Update zos_job_submit choices * Update zos_mount choices * Update zos_unarchive choices * Fix zos_archive and update its tests This also includes major work on zos_data_set since half of the test suite for zos_archive depends on creating data sets. * Update zos_backup_restore tests * Update zos_blockinfile tests * Update more modules * Updated more tests * Update zos_unarchive and zos_mount * Update zos_backup_restore unit tests * Update zos_mvs_raw * Update zos_copy tests * Fix some sanity issues * Fix zos_copy KSDS test * Update zos_copy some more * Fix ZFS call * Update zos_unarchive tests * Add massive changelog fragment * Fix call to zos_data_set * Fix more test issues in zos_fetch * Fix zos_find tests * Generate updated docs --- .../fragments/1388-lowercase-choices.yml | 87 +++++ docs/source/modules/zos_apf.rst | 68 ++-- docs/source/modules/zos_apf.rst-e | 318 +++++++++++++++ docs/source/modules/zos_archive.rst | 102 ++--- docs/source/modules/zos_backup_restore.rst | 80 ++-- docs/source/modules/zos_blockinfile.rst | 52 +-- docs/source/modules/zos_copy.rst | 226 ++++++----- docs/source/modules/zos_data_set.rst | 222 +++++------ docs/source/modules/zos_encode.rst | 32 +- docs/source/modules/zos_fetch.rst | 18 +- docs/source/modules/zos_find.rst | 20 +- docs/source/modules/zos_gather_facts.rst | 14 +- docs/source/modules/zos_job_output.rst | 16 +- docs/source/modules/zos_job_query.rst | 20 +- docs/source/modules/zos_job_submit.rst | 95 +++-- docs/source/modules/zos_lineinfile.rst | 68 ++-- docs/source/modules/zos_mount.rst | 124 +++--- docs/source/modules/zos_mvs_raw.rst | 364 +++++++++--------- docs/source/modules/zos_operator.rst | 2 +- .../modules/zos_operator_action_query.rst | 20 +- docs/source/modules/zos_ping.rst | 8 +- docs/source/modules/zos_script.rst | 32 +- docs/source/modules/zos_tso_command.rst | 4 +- docs/source/modules/zos_unarchive.rst | 68 ++-- docs/source/modules/zos_volume_init.rst | 34 +- plugins/action/zos_copy.py | 12 +- plugins/action/zos_job_submit.py | 6 +- plugins/action/zos_unarchive.py | 6 +- plugins/module_utils/data_set.py | 2 +- plugins/modules/zos_archive.py | 84 ++-- plugins/modules/zos_backup_restore.py | 32 +- plugins/modules/zos_copy.py | 93 ++--- plugins/modules/zos_data_set.py | 354 +++++++++-------- plugins/modules/zos_job_submit.py | 52 +-- plugins/modules/zos_mount.py | 138 +++---- plugins/modules/zos_mvs_raw.py | 252 ++++++------ plugins/modules/zos_unarchive.py | 62 +-- .../modules/test_zos_archive_func.py | 90 ++--- .../modules/test_zos_backup_restore.py | 20 +- .../modules/test_zos_blockinfile_func.py | 18 +- .../functional/modules/test_zos_copy_func.py | 326 ++++++++-------- .../modules/test_zos_data_set_func.py | 80 ++-- .../modules/test_zos_encode_func.py | 16 +- .../functional/modules/test_zos_fetch_func.py | 32 +- .../functional/modules/test_zos_find_func.py | 16 +- .../modules/test_zos_job_output_func.py | 4 +- .../modules/test_zos_job_query_func.py | 8 +- .../modules/test_zos_job_submit_func.py | 58 +-- .../modules/test_zos_lineinfile_func.py | 17 +- .../functional/modules/test_zos_mount_func.py | 38 +- .../modules/test_zos_mvs_raw_func.py | 86 ++--- .../modules/test_zos_unarchive_func.py | 104 ++--- tests/unit/test_zos_backup_restore_unit.py | 2 +- tests/unit/test_zos_mvs_raw_unit.py | 80 ++-- 54 files changed, 2302 insertions(+), 1880 deletions(-) create mode 100644 changelogs/fragments/1388-lowercase-choices.yml create mode 100644 docs/source/modules/zos_apf.rst-e diff --git a/changelogs/fragments/1388-lowercase-choices.yml b/changelogs/fragments/1388-lowercase-choices.yml new file mode 100644 index 000000000..0f14f42fe --- /dev/null +++ b/changelogs/fragments/1388-lowercase-choices.yml @@ -0,0 +1,87 @@ +breaking_changes: + - zos_archive - option ``terse_pack`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + Suboption ``type`` of ``dest_data_set`` no longer accepts uppercase + choices, users should replace them with lowercase ones. + Suboption ``space_type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_backup_restore - option ``space_type`` no longer accepts uppercase + choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_copy - suboption ``type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``space_type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_data_set - option ``type`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + Option ``space_type`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``record_format`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Options inside ``batch`` no longer accept uppercase choices, users should + replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_job_submit - option ``location`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_mount - option ``fs_type`` no longer accepts uppercase choices, + users should replace them with lowercase ones. + Option ``unmount_opts`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``mount_opts`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``tag_untagged`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + Option ``automove`` no longer accepts uppercase choices, users + should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_mvs_raw - suboption ``type`` of ``dd_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboptions ``disposition_normal`` and ``disposition_abnormal`` of + ``dd_data_set`` no longer accept ``catlg`` and ``uncatlg`` as choices. + This also applies when defining a ``dd_data_set`` inside ``dd_concat``. + Suboption ``space_type`` of ``dd_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dd_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dd_unix`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Options inside ``dd_concat`` no longer accept uppercase choices, + users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_unarchive - suboption ``type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``space_type`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + Suboption ``record_format`` of ``dest_data_set`` no longer accepts + uppercase choices, users should replace them with lowercase ones. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + +trivial: + - zos_blockinfile - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_find - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_lineinfile - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_encode - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_fetch - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_job_output - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). + - zos_job_query - updated tests to use lowercase options when calling + another module in the collection. + (https://github.com/ansible-collections/ibm_zos_core/pull/1388). \ No newline at end of file diff --git a/docs/source/modules/zos_apf.rst b/docs/source/modules/zos_apf.rst index e9a55c007..73d616e76 100644 --- a/docs/source/modules/zos_apf.rst +++ b/docs/source/modules/zos_apf.rst @@ -37,7 +37,7 @@ library state - Ensure that the library is added ``state=present`` or removed ``state=absent``. + Ensure that the library is added \ :literal:`state=present`\ or removed \ :literal:`state=absent`\ . The APF list format has to be "DYNAMIC". @@ -58,24 +58,24 @@ force_dynamic volume - The identifier for the volume containing the library specified in the ``library`` parameter. The values must be one the following. + The identifier for the volume containing the library specified in the \ :literal:`library`\ parameter. The values must be one the following. 1. The volume serial number. - 2. Six asterisks (******), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. - If ``volume`` is not specified, ``library`` has to be cataloged. + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. | **required**: False | **type**: str sms - Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - If ``sms=True``, ``volume`` value will be ignored. + If \ :literal:`sms=True`\ , \ :literal:`volume`\ value will be ignored. | **required**: False | **type**: bool @@ -83,13 +83,13 @@ sms operation - Change APF list format to "DYNAMIC" ``operation=set_dynamic`` or "STATIC" ``operation=set_static`` + Change APF list format to "DYNAMIC" \ :literal:`operation=set\_dynamic`\ or "STATIC" \ :literal:`operation=set\_static`\ - Display APF list current format ``operation=check_format`` + Display APF list current format \ :literal:`operation=check\_format`\ - Display APF list entries when ``operation=list`` ``library``, ``volume`` and ``sms`` will be used as filters. + Display APF list entries when \ :literal:`operation=list`\ \ :literal:`library`\ , \ :literal:`volume`\ and \ :literal:`sms`\ will be used as filters. - If ``operation`` is not set, add or remove operation will be ignored. + If \ :literal:`operation`\ is not set, add or remove operation will be ignored. | **required**: False | **type**: str @@ -99,23 +99,23 @@ operation tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str persistent - Add/remove persistent entries to or from *data_set_name* + Add/remove persistent entries to or from \ :emphasis:`data\_set\_name`\ - ``library`` will not be persisted or removed if ``persistent=None`` + \ :literal:`library`\ will not be persisted or removed if \ :literal:`persistent=None`\ | **required**: False | **type**: dict data_set_name - The data set name used for persisting or removing a ``library`` from the APF list. + The data set name used for persisting or removing a \ :literal:`library`\ from the APF list. | **required**: True | **type**: str @@ -124,13 +124,13 @@ persistent marker The marker line template. - ``{mark}`` will be replaced with "BEGIN" and "END". + \ :literal:`{mark}`\ will be replaced with "BEGIN" and "END". - Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. + Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. - ``{mark}`` length may not exceed 72 characters. + \ :literal:`{mark}`\ length may not exceed 72 characters. - The timestamp () used in the default marker follows the '+%Y%m%d-%H%M%S' date format + The timestamp (\) used in the default marker follows the '+%Y%m%d-%H%M%S' date format | **required**: False | **type**: str @@ -138,9 +138,9 @@ persistent backup - Creates a backup file or backup data set for *data_set_name*, including the timestamp information to ensure that you retrieve the original APF list defined in *data_set_name*". + Creates a backup file or backup data set for \ :emphasis:`data\_set\_name`\ , including the timestamp information to ensure that you retrieve the original APF list defined in \ :emphasis:`data\_set\_name`\ ". - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . The backup file name will be return on either success or failure of module execution such that data can be retrieved. @@ -152,11 +152,11 @@ persistent backup_name Specify the USS file name or data set name for the destination backup. - If the source *data_set_name* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. + If the source \ :emphasis:`data\_set\_name`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup_name must be an MVS data set name. + If the source is an MVS data set, the backup\_name must be an MVS data set name. - If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -168,9 +168,9 @@ persistent batch A list of dictionaries for adding or removing libraries. - This is mutually exclusive with ``library``, ``volume``, ``sms`` + This is mutually exclusive with \ :literal:`library`\ , \ :literal:`volume`\ , \ :literal:`sms`\ - Can be used with ``persistent`` + Can be used with \ :literal:`persistent`\ | **required**: False | **type**: list @@ -185,24 +185,24 @@ batch volume - The identifier for the volume containing the library specified on the ``library`` parameter. The values must be one of the following. + The identifier for the volume containing the library specified on the \ :literal:`library`\ parameter. The values must be one of the following. 1. The volume serial number - 2. Six asterisks (******), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. - If ``volume`` is not specified, ``library`` has to be cataloged. + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. | **required**: False | **type**: str sms - Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - If true ``volume`` will be ignored. + If true \ :literal:`volume`\ will be ignored. | **required**: False | **type**: bool @@ -283,9 +283,9 @@ Return Values stdout The stdout from ZOAU command apfadm. Output varies based on the type of operation. - state> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm + state\> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm - operation> stdout of operation options list> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set_dynamic> Set to DYNAMIC set_static> Set to STATIC check_format> DYNAMIC or STATIC + operation\> stdout of operation options list\> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set\_dynamic\> Set to DYNAMIC set\_static\> Set to STATIC check\_format\> DYNAMIC or STATIC | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_apf.rst-e b/docs/source/modules/zos_apf.rst-e new file mode 100644 index 000000000..ec8e6824c --- /dev/null +++ b/docs/source/modules/zos_apf.rst-e @@ -0,0 +1,318 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_apf.py + +.. _zos_apf_module: + + +zos_apf -- Add or remove libraries to Authorized Program Facility (APF) +======================================================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Adds or removes libraries to Authorized Program Facility (APF). +- Manages APF statement persistent entries to a data set or data set member. +- Changes APF list format to "DYNAMIC" or "STATIC". +- Gets the current APF list entries. + + + + + +Parameters +---------- + + +library + The library name to be added or removed from the APF list. + + | **required**: False + | **type**: str + + +state + Ensure that the library is added \ :literal:`state=present`\ or removed \ :literal:`state=absent`\ . + + The APF list format has to be "DYNAMIC". + + | **required**: False + | **type**: str + | **default**: present + | **choices**: absent, present + + +force_dynamic + Will force the APF list format to "DYNAMIC" before adding or removing libraries. + + If the format is "STATIC", the format will be changed to "DYNAMIC". + + | **required**: False + | **type**: bool + | **default**: False + + +volume + The identifier for the volume containing the library specified in the \ :literal:`library`\ parameter. The values must be one the following. + + 1. The volume serial number. + + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. + + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. + + | **required**: False + | **type**: str + + +sms + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + + If \ :literal:`sms=True`\ , \ :literal:`volume`\ value will be ignored. + + | **required**: False + | **type**: bool + | **default**: False + + +operation + Change APF list format to "DYNAMIC" \ :literal:`operation=set\_dynamic`\ or "STATIC" \ :literal:`operation=set\_static`\ + + Display APF list current format \ :literal:`operation=check\_format`\ + + Display APF list entries when \ :literal:`operation=list`\ \ :literal:`library`\ , \ :literal:`volume`\ and \ :literal:`sms`\ will be used as filters. + + If \ :literal:`operation`\ is not set, add or remove operation will be ignored. + + | **required**: False + | **type**: str + | **choices**: set_dynamic, set_static, check_format, list + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + + | **required**: False + | **type**: str + + +persistent + Add/remove persistent entries to or from \ :emphasis:`data\_set\_name`\ + + \ :literal:`library`\ will not be persisted or removed if \ :literal:`persistent=None`\ + + | **required**: False + | **type**: dict + + + data_set_name + The data set name used for persisting or removing a \ :literal:`library`\ from the APF list. + + | **required**: True + | **type**: str + + + marker + The marker line template. + + \ :literal:`{mark}`\ will be replaced with "BEGIN" and "END". + + Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. + + \ :literal:`{mark}`\ length may not exceed 72 characters. + + The timestamp (\) used in the default marker follows the '+%Y%m%d-%H%M%S' date format + + | **required**: False + | **type**: str + | **default**: /* {mark} ANSIBLE MANAGED BLOCK */ + + + backup + Creates a backup file or backup data set for \ :emphasis:`data\_set\_name`\ , including the timestamp information to ensure that you retrieve the original APF list defined in \ :emphasis:`data\_set\_name`\ ". + + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . + + The backup file name will be return on either success or failure of module execution such that data can be retrieved. + + | **required**: False + | **type**: bool + | **default**: False + + + backup_name + Specify the USS file name or data set name for the destination backup. + + If the source \ :emphasis:`data\_set\_name`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. + + If the source is an MVS data set, the backup\_name must be an MVS data set name. + + If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . + + If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. + + | **required**: False + | **type**: str + + + +batch + A list of dictionaries for adding or removing libraries. + + This is mutually exclusive with \ :literal:`library`\ , \ :literal:`volume`\ , \ :literal:`sms`\ + + Can be used with \ :literal:`persistent`\ + + | **required**: False + | **type**: list + | **elements**: dict + + + library + The library name to be added or removed from the APF list. + + | **required**: True + | **type**: str + + + volume + The identifier for the volume containing the library specified on the \ :literal:`library`\ parameter. The values must be one of the following. + + 1. The volume serial number + + 2. Six asterisks (\*\*\*\*\*\*), indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + + 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. + + If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. + + | **required**: False + | **type**: str + + + sms + Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + + If true \ :literal:`volume`\ will be ignored. + + | **required**: False + | **type**: bool + | **default**: False + + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Add a library to the APF list + zos_apf: + library: SOME.SEQUENTIAL.DATASET + volume: T12345 + - name: Add a library (cataloged) to the APF list and persistence + zos_apf: + library: SOME.SEQUENTIAL.DATASET + force_dynamic: True + persistent: + data_set_name: SOME.PARTITIONED.DATASET(MEM) + - name: Remove a library from the APF list and persistence + zos_apf: + state: absent + library: SOME.SEQUENTIAL.DATASET + volume: T12345 + persistent: + data_set_name: SOME.PARTITIONED.DATASET(MEM) + - name: Batch libraries with custom marker, persistence for the APF list + zos_apf: + persistent: + data_set_name: "SOME.PARTITIONED.DATASET(MEM)" + marker: "/* {mark} PROG001 USR0010 */" + batch: + - library: SOME.SEQ.DS1 + - library: SOME.SEQ.DS2 + sms: True + - library: SOME.SEQ.DS3 + volume: T12345 + - name: Print the APF list matching library pattern or volume serial number + zos_apf: + operation: list + library: SOME.SEQ.* + volume: T12345 + - name: Set the APF list format to STATIC + zos_apf: + operation: set_static + + + + +Notes +----- + +.. note:: + It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF® FACILITY resource class. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + To add or delete the APF list entry for library libname, you must have UPDATE authority to the RACF® FACILITY resource class entity CSVAPF.libname, or there must be no FACILITY class profile that protects that entity. + + To change the format of the APF list to dynamic, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.DYNAMIC, or there must be no FACILITY class profile that protects that entity. + + To change the format of the APF list back to static, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.STATIC, or there must be no FACILITY class profile that protects that entity. + + + + + + + +Return Values +------------- + + +stdout + The stdout from ZOAU command apfadm. Output varies based on the type of operation. + + state\> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm + + operation\> stdout of operation options list\> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set\_dynamic\> Set to DYNAMIC set\_static\> Set to STATIC check\_format\> DYNAMIC or STATIC + + | **returned**: always + | **type**: str + +stderr + The error messages from ZOAU command apfadm + + | **returned**: always + | **type**: str + | **sample**: BGYSC1310E ADD Error: Dataset COMMON.LINKLIB volume COMN01 is already present in APF list. + +rc + The return code from ZOAU command apfadm + + | **returned**: always + | **type**: int + +msg + The module messages + + | **returned**: failure + | **type**: str + | **sample**: Parameter verification failed + +backup_name + Name of the backup file or data set that was created. + + | **returned**: if backup=true, always + | **type**: str + diff --git a/docs/source/modules/zos_archive.rst b/docs/source/modules/zos_archive.rst index fe93474f0..3249f3ba8 100644 --- a/docs/source/modules/zos_archive.rst +++ b/docs/source/modules/zos_archive.rst @@ -20,7 +20,7 @@ Synopsis - Sources for archiving must be on the remote z/OS system. - Supported sources are USS (UNIX System Services) or z/OS data sets. - The archive remains on the remote z/OS system. -- For supported archive formats, see option ``format``. +- For supported archive formats, see option \ :literal:`format`\ . @@ -35,7 +35,7 @@ src USS file paths should be absolute paths. - MVS data sets supported types are: ``SEQ``, ``PDS``, ``PDSE``. + MVS data sets supported types are: \ :literal:`SEQ`\ , \ :literal:`PDS`\ , \ :literal:`PDSE`\ . VSAMs are not supported. @@ -68,7 +68,7 @@ format terse_pack - Compression option for use with the terse format, *name=terse*. + Compression option for use with the terse format, \ :emphasis:`name=terse`\ . Pack will compress records in a data set so that the output results in lossless data compression. @@ -78,7 +78,7 @@ format | **required**: False | **type**: str - | **choices**: PACK, SPACK + | **choices**: pack, spack xmit_log_data_set @@ -88,14 +88,14 @@ format If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - When providing the *xmit_log_data_set* name, ensure there is adequate space. + When providing the \ :emphasis:`xmit\_log\_data\_set`\ name, ensure there is adequate space. | **required**: False | **type**: str use_adrdssu - If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using ``xmit`` or ``terse``. + If set to true, the \ :literal:`zos\_archive`\ module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using \ :literal:`xmit`\ or \ :literal:`terse`\ . | **required**: False | **type**: bool @@ -107,19 +107,19 @@ format dest The remote absolute path or data set where the archive should be created. - *dest* can be a USS file or MVS data set name. + \ :emphasis:`dest`\ can be a USS file or MVS data set name. - If *dest* has missing parent directories, they will be created. + If \ :emphasis:`dest`\ has missing parent directories, they will be created. - If *dest* is a nonexistent USS file, it will be created. + If \ :emphasis:`dest`\ is a nonexistent USS file, it will be created. - If *dest* is an existing file or data set and *force=true*, the existing *dest* will be deleted and recreated with attributes defined in the *dest_data_set* option or computed by the module. + If \ :emphasis:`dest`\ is an existing file or data set and \ :emphasis:`force=true`\ , the existing \ :emphasis:`dest`\ will be deleted and recreated with attributes defined in the \ :emphasis:`dest\_data\_set`\ option or computed by the module. - If *dest* is an existing file or data set and *force=false* or not specified, the module exits with a note to the user. + If \ :emphasis:`dest`\ is an existing file or data set and \ :emphasis:`force=false`\ or not specified, the module exits with a note to the user. - Destination data set attributes can be set using *dest_data_set*. + Destination data set attributes can be set using \ :emphasis:`dest\_data\_set`\ . - Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the *dest_data_set* option will improve performance. + Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the \ :emphasis:`dest\_data\_set`\ option will improve performance. | **required**: True | **type**: str @@ -128,9 +128,9 @@ dest exclude Remote absolute path, glob, or list of paths, globs or data set name patterns for the file, files or data sets to exclude from src list and glob expansion. - Patterns (wildcards) can contain one of the following, `?`, `*`. + Patterns (wildcards) can contain one of the following, \`?\`, \`\*\`. - * matches everything. + \* matches everything. ? matches any single character. @@ -144,7 +144,7 @@ group When left unspecified, it uses the current group of the current use unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if ``dest`` is USS, otherwise ignored. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str @@ -153,13 +153,13 @@ group mode The permission of the destination archive file. - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. The mode may also be specified as a symbolic mode (for example, 'u+rwx' or 'u=rw,g=r,o=r') or a special string 'preserve'. - *mode=preserve* means that the file will be given the same permissions as the src file. + \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the src file. | **required**: False | **type**: str @@ -170,14 +170,14 @@ owner When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if ``dest`` is USS, otherwise ignored. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str remove - Remove any added source files , trees or data sets after module `zos_archive <./zos_archive.html>`_ adds them to the archive. Source files, trees and data sets are identified with option *src*. + Remove any added source files , trees or data sets after module \ `zos\_archive <./zos_archive.html>`__\ adds them to the archive. Source files, trees and data sets are identified with option \ :emphasis:`src`\ . | **required**: False | **type**: bool @@ -185,7 +185,7 @@ remove dest_data_set - Data set attributes to customize a ``dest`` data set to be archived into. + Data set attributes to customize a \ :literal:`dest`\ data set to be archived into. | **required**: False | **type**: dict @@ -203,23 +203,23 @@ dest_data_set | **required**: False | **type**: str - | **default**: SEQ - | **choices**: SEQ + | **default**: seq + | **choices**: seq space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -228,21 +228,21 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) + If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`FB`\ ) - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str - | **choices**: FB, VB, FBA, VBA, U + | **choices**: fb, vb, fba, vba, u record_length @@ -313,18 +313,18 @@ dest_data_set tmp_hlq Override the default high level qualifier (HLQ) for temporary data sets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str force - If set to ``true`` and the remote file or data set ``dest`` will be deleted. Otherwise it will be created with the ``dest_data_set`` attributes or default values if ``dest_data_set`` is not specified. + If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ will be deleted. Otherwise it will be created with the \ :literal:`dest\_data\_set`\ attributes or default values if \ :literal:`dest\_data\_set`\ is not specified. - If set to ``false``, the file or data set will only be copied if the destination does not exist. + If set to \ :literal:`false`\ , the file or data set will only be copied if the destination does not exist. - If set to ``false`` and destination exists, the module exits with a note to the user. + If set to \ :literal:`false`\ and destination exists, the module exits with a note to the user. | **required**: False | **type**: bool @@ -373,7 +373,7 @@ Examples format: name: terse format_options: - terse_pack: "SPACK" + terse_pack: "spack" use_adrdssu: True # Use a pattern to store @@ -392,11 +392,11 @@ Notes ----- .. note:: - This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos_fetch to retrieve to the controller and then zos_copy or zos_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. + This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos\_fetch to retrieve to the controller and then zos\_copy or zos\_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. - When packing and using ``use_adrdssu`` flag the module will take up to two times the space indicated in ``dest_data_set``. + When packing and using \ :literal:`use\_adrdssu`\ flag the module will take up to two times the space indicated in \ :literal:`dest\_data\_set`\ . - tar, zip, bz2 and pax are archived using python ``tarfile`` library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. + tar, zip, bz2 and pax are archived using python \ :literal:`tarfile`\ library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. @@ -416,27 +416,27 @@ Return Values state - The state of the input ``src``. + The state of the input \ :literal:`src`\ . - ``absent`` when the source files or data sets were removed. + \ :literal:`absent`\ when the source files or data sets were removed. - ``present`` when the source files or data sets were not removed. + \ :literal:`present`\ when the source files or data sets were not removed. - ``incomplete`` when ``remove`` was true and the source files or data sets were not removed. + \ :literal:`incomplete`\ when \ :literal:`remove`\ was true and the source files or data sets were not removed. | **returned**: always | **type**: str dest_state - The state of the *dest* file or data set. + The state of the \ :emphasis:`dest`\ file or data set. - ``absent`` when the file does not exist. + \ :literal:`absent`\ when the file does not exist. - ``archive`` when the file is an archive. + \ :literal:`archive`\ when the file is an archive. - ``compress`` when the file is compressed, but not an archive. + \ :literal:`compress`\ when the file is compressed, but not an archive. - ``incomplete`` when the file is an archive, but some files under *src* were not found. + \ :literal:`incomplete`\ when the file is an archive, but some files under \ :emphasis:`src`\ were not found. | **returned**: success | **type**: str @@ -454,7 +454,7 @@ archived | **type**: list arcroot - If ``src`` is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. + If \ :literal:`src`\ is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_backup_restore.rst b/docs/source/modules/zos_backup_restore.rst index d70efc7a1..6833279fa 100644 --- a/docs/source/modules/zos_backup_restore.rst +++ b/docs/source/modules/zos_backup_restore.rst @@ -47,34 +47,34 @@ data_sets include - When *operation=backup*, specifies a list of data sets or data set patterns to include in the backup. + When \ :emphasis:`operation=backup`\ , specifies a list of data sets or data set patterns to include in the backup. - When *operation=restore*, specifies a list of data sets or data set patterns to include when restoring from a backup. + When \ :emphasis:`operation=restore`\ , specifies a list of data sets or data set patterns to include when restoring from a backup. - The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate to DFSMSdss that only part of a qualifier has been specified. + The single asterisk, \ :literal:`\*`\ , is used in place of exactly one qualifier. In addition, it can be used to indicate to DFSMSdss that only part of a qualifier has been specified. - When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. + When used with other qualifiers, the double asterisk, \ :literal:`\*\*`\ , indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. - A question mark ``?`` or percent sign ``%`` matches a single character. + A question mark \ :literal:`?`\ or percent sign \ :literal:`%`\ matches a single character. | **required**: False | **type**: raw exclude - When *operation=backup*, specifies a list of data sets or data set patterns to exclude from the backup. + When \ :emphasis:`operation=backup`\ , specifies a list of data sets or data set patterns to exclude from the backup. - When *operation=restore*, specifies a list of data sets or data set patterns to exclude when restoring from a backup. + When \ :emphasis:`operation=restore`\ , specifies a list of data sets or data set patterns to exclude when restoring from a backup. - The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate that only part of a qualifier has been specified." + The single asterisk, \ :literal:`\*`\ , is used in place of exactly one qualifier. In addition, it can be used to indicate that only part of a qualifier has been specified." - When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. + When used with other qualifiers, the double asterisk, \ :literal:`\*\*`\ , indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. - A question mark ``?`` or percent sign ``%`` matches a single character. + A question mark \ :literal:`?`\ or percent sign \ :literal:`%`\ matches a single character. | **required**: False | **type**: raw @@ -84,22 +84,22 @@ data_sets volume This applies to both data set restores and volume restores. - When *operation=backup* and *data_sets* are provided, specifies the volume that contains the data sets to backup. + When \ :emphasis:`operation=backup`\ and \ :emphasis:`data\_sets`\ are provided, specifies the volume that contains the data sets to backup. - When *operation=restore*, specifies the volume the backup should be restored to. + When \ :emphasis:`operation=restore`\ , specifies the volume the backup should be restored to. - *volume* is required when restoring a full volume backup. + \ :emphasis:`volume`\ is required when restoring a full volume backup. | **required**: False | **type**: str full_volume - When *operation=backup* and *full_volume=True*, specifies that the entire volume provided to *volume* should be backed up. + When \ :emphasis:`operation=backup`\ and \ :emphasis:`full\_volume=True`\ , specifies that the entire volume provided to \ :emphasis:`volume`\ should be backed up. - When *operation=restore* and *full_volume=True*, specifies that the volume should be restored (default is dataset). + When \ :emphasis:`operation=restore`\ and \ :emphasis:`full\_volume=True`\ , specifies that the volume should be restored (default is dataset). - *volume* must be provided when *full_volume=True*. + \ :emphasis:`volume`\ must be provided when \ :emphasis:`full\_volume=True`\ . | **required**: False | **type**: bool @@ -109,18 +109,18 @@ full_volume temp_volume Specifies a particular volume on which the temporary data sets should be created during the backup and restore process. - When *operation=backup* and *backup_name* is a data set, specifies the volume the backup should be placed in. + When \ :emphasis:`operation=backup`\ and \ :emphasis:`backup\_name`\ is a data set, specifies the volume the backup should be placed in. | **required**: False | **type**: str backup_name - When *operation=backup*, the destination data set or UNIX file to hold the backup. + When \ :emphasis:`operation=backup`\ , the destination data set or UNIX file to hold the backup. - When *operation=restore*, the destination data set or UNIX file backup to restore. + When \ :emphasis:`operation=restore`\ , the destination data set or UNIX file backup to restore. - There are no enforced conventions for backup names. However, using a common extension like ``.dzp`` for UNIX files and ``.DZP`` for data sets will improve readability. + There are no enforced conventions for backup names. However, using a common extension like \ :literal:`.dzp`\ for UNIX files and \ :literal:`.DZP`\ for data sets will improve readability. | **required**: True | **type**: str @@ -135,9 +135,9 @@ recover overwrite - When *operation=backup*, specifies if an existing data set or UNIX file matching *backup_name* should be deleted. + When \ :emphasis:`operation=backup`\ , specifies if an existing data set or UNIX file matching \ :emphasis:`backup\_name`\ should be deleted. - When *operation=restore*, specifies if the module should overwrite existing data sets with matching name on the target device. + When \ :emphasis:`operation=restore`\ , specifies if the module should overwrite existing data sets with matching name on the target device. | **required**: False | **type**: bool @@ -145,35 +145,35 @@ overwrite sms_storage_class - When *operation=restore*, specifies the storage class to use. The storage class will also be used for temporary data sets created during restore process. + When \ :emphasis:`operation=restore`\ , specifies the storage class to use. The storage class will also be used for temporary data sets created during restore process. - When *operation=backup*, specifies the storage class to use for temporary data sets created during backup process. + When \ :emphasis:`operation=backup`\ , specifies the storage class to use for temporary data sets created during backup process. - If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. + If neither of \ :emphasis:`sms\_storage\_class`\ or \ :emphasis:`sms\_management\_class`\ are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. | **required**: False | **type**: str sms_management_class - When *operation=restore*, specifies the management class to use. The management class will also be used for temporary data sets created during restore process. + When \ :emphasis:`operation=restore`\ , specifies the management class to use. The management class will also be used for temporary data sets created during restore process. - When *operation=backup*, specifies the management class to use for temporary data sets created during backup process. + When \ :emphasis:`operation=backup`\ , specifies the management class to use for temporary data sets created during backup process. - If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. + If neither of \ :emphasis:`sms\_storage\_class`\ or \ :emphasis:`sms\_management\_class`\ are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. | **required**: False | **type**: str space - If *operation=backup*, specifies the amount of space to allocate for the backup. Please note that even when backing up to a UNIX file, backup contents will be temporarily held in a data set. + If \ :emphasis:`operation=backup`\ , specifies the amount of space to allocate for the backup. Please note that even when backing up to a UNIX file, backup contents will be temporarily held in a data set. - If *operation=restore*, specifies the amount of space to allocate for data sets temporarily created during the restore process. + If \ :emphasis:`operation=restore`\ , specifies the amount of space to allocate for data sets temporarily created during the restore process. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . - When *full_volume=True*, *space* defaults to ``1``, otherwise default is ``25`` + When \ :emphasis:`full\_volume=True`\ , \ :emphasis:`space`\ defaults to \ :literal:`1`\ , otherwise default is \ :literal:`25`\ | **required**: False | **type**: int @@ -182,13 +182,13 @@ space space_type The unit of measurement to use when defining data set space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . - When *full_volume=True*, *space_type* defaults to ``G``, otherwise default is ``M`` + When \ :emphasis:`full\_volume=True`\ , \ :emphasis:`space\_type`\ defaults to \ :literal:`g`\ , otherwise default is \ :literal:`m`\ | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk hlq @@ -203,7 +203,7 @@ hlq tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup data sets. - The default HLQ is the Ansible user that executes the module and if that is not available, then the value of ``TMPHLQ`` is used. + The default HLQ is the Ansible user that executes the module and if that is not available, then the value of \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -251,7 +251,7 @@ Examples include: user.** backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup all datasets matching the pattern USER.** that are present on the volume MYVOL1 to data set MY.BACKUP.DZP, @@ -263,7 +263,7 @@ Examples volume: MYVOL1 backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup an entire volume, MYVOL1, to the UNIX file /tmp/temp_backup.dzp, allocate 1GB for data sets used in backup process. @@ -273,7 +273,7 @@ Examples volume: MYVOL1 full_volume: yes space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Use z/OS username as new HLQ. @@ -317,7 +317,7 @@ Examples full_volume: yes backup_name: MY.BACKUP.DZP space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Specify DB2SMS10 for the SMS storage and management classes to use for the restored diff --git a/docs/source/modules/zos_blockinfile.rst b/docs/source/modules/zos_blockinfile.rst index f3eef5967..8cd6f756c 100644 --- a/docs/source/modules/zos_blockinfile.rst +++ b/docs/source/modules/zos_blockinfile.rst @@ -38,9 +38,9 @@ src state - Whether the block should be inserted or replaced using *state=present*. + Whether the block should be inserted or replaced using \ :emphasis:`state=present`\ . - Whether the block should be removed using *state=absent*. + Whether the block should be removed using \ :emphasis:`state=absent`\ . | **required**: False | **type**: str @@ -51,9 +51,9 @@ state marker The marker line template. - ``{mark}`` will be replaced with the values ``in marker_begin`` (default="BEGIN") and ``marker_end`` (default="END"). + \ :literal:`{mark}`\ will be replaced with the values \ :literal:`in marker\_begin`\ (default="BEGIN") and \ :literal:`marker\_end`\ (default="END"). - Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. + Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. | **required**: False | **type**: str @@ -63,7 +63,7 @@ marker block The text to insert inside the marker lines. - Multi-line can be separated by '\n'. + Multi-line can be separated by '\\n'. Any double-quotation marks will be removed. @@ -74,11 +74,11 @@ block insertafter If specified, the block will be inserted after the last match of the specified regular expression. - A special value ``EOF`` for inserting a block at the end of the file is available. + A special value \ :literal:`EOF`\ for inserting a block at the end of the file is available. - If a specified regular expression has no matches, ``EOF`` will be used instead. + If a specified regular expression has no matches, \ :literal:`EOF`\ will be used instead. - Choices are EOF or '*regex*'. + Choices are EOF or '\*regex\*'. Default is EOF. @@ -89,18 +89,18 @@ insertafter insertbefore If specified, the block will be inserted before the last match of specified regular expression. - A special value ``BOF`` for inserting the block at the beginning of the file is available. + A special value \ :literal:`BOF`\ for inserting the block at the beginning of the file is available. If a specified regular expression has no matches, the block will be inserted at the end of the file. - Choices are BOF or '*regex*'. + Choices are BOF or '\*regex\*'. | **required**: False | **type**: str marker_begin - This will be inserted at ``{mark}`` in the opening ansible block marker. + This will be inserted at \ :literal:`{mark}`\ in the opening ansible block marker. | **required**: False | **type**: str @@ -108,7 +108,7 @@ marker_begin marker_end - This will be inserted at ``{mark}`` in the closing ansible block marker. + This will be inserted at \ :literal:`{mark}`\ in the closing ansible block marker. | **required**: False | **type**: str @@ -116,9 +116,9 @@ marker_end backup - Specifies whether a backup of destination should be created before editing the source *src*. + Specifies whether a backup of destination should be created before editing the source \ :emphasis:`src`\ . - When set to ``true``, the module creates a backup file or data set. + When set to \ :literal:`true`\ , the module creates a backup file or data set. The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -130,15 +130,15 @@ backup backup_name Specify the USS file name or data set name for the destination backup. - If the source *src* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. + If the source \ :emphasis:`src`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup_name name must be an MVS data set name, and the dataset must not be preallocated. + If the source is an MVS data set, the backup\_name name must be an MVS data set name, and the dataset must not be preallocated. - If the backup_name is not provided, the default backup_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default backup\_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. - If *src* is a data set member and backup_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + If \ :emphasis:`src`\ is a data set member and backup\_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. | **required**: False | **type**: str @@ -147,14 +147,14 @@ backup_name tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str encoding - The character set of the source *src*. `zos_blockinfile <./zos_blockinfile.html>`_ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + The character set of the source \ :emphasis:`src`\ . \ `zos\_blockinfile <./zos_blockinfile.html>`__\ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -168,7 +168,7 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :literal:`force`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . | **required**: False | **type**: bool @@ -290,13 +290,13 @@ Notes .. note:: It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. + All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The \ `zos\_data\_set <./zos_data_set.html>`__\ module can be used to catalog uncataloged data sets. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . - When using ``with_*`` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. + When using \`\`with\_\*\`\` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. - When more then one block should be handled in a file you must change the *marker* per task. + When more then one block should be handled in a file you must change the \ :emphasis:`marker`\ per task. @@ -315,7 +315,7 @@ Return Values changed - Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. + Indicates if the source was modified. Value of 1 represents \`true\`, otherwise \`false\`. | **returned**: success | **type**: bool diff --git a/docs/source/modules/zos_copy.rst b/docs/source/modules/zos_copy.rst index 00e274b00..5ea5bf3ef 100644 --- a/docs/source/modules/zos_copy.rst +++ b/docs/source/modules/zos_copy.rst @@ -16,7 +16,7 @@ zos_copy -- Copy data to z/OS Synopsis -------- -- The `zos_copy <./zos_copy.html>`_ module copies a file or data set from a local or a remote machine to a location on the remote machine. +- The \ `zos\_copy <./zos_copy.html>`__\ module copies a file or data set from a local or a remote machine to a location on the remote machine. @@ -27,17 +27,17 @@ Parameters asa_text - If set to ``true``, indicates that either ``src`` or ``dest`` or both contain ASA control characters. + If set to \ :literal:`true`\ , indicates that either \ :literal:`src`\ or \ :literal:`dest`\ or both contain ASA control characters. - When ``src`` is a USS file and ``dest`` is a data set, the copy will preserve ASA control characters in the destination. + When \ :literal:`src`\ is a USS file and \ :literal:`dest`\ is a data set, the copy will preserve ASA control characters in the destination. - When ``src`` is a data set containing ASA control characters and ``dest`` is a USS file, the copy will put all control characters as plain text in the destination. + When \ :literal:`src`\ is a data set containing ASA control characters and \ :literal:`dest`\ is a USS file, the copy will put all control characters as plain text in the destination. - If ``dest`` is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). + If \ :literal:`dest`\ is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). - If neither ``src`` or ``dest`` have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. + If neither \ :literal:`src`\ or \ :literal:`dest`\ have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. - This option is only valid for text files. If ``is_binary`` is ``true`` or ``executable`` is ``true`` as well, the module will fail. + This option is only valid for text files. If \ :literal:`is\_binary`\ is \ :literal:`true`\ or \ :literal:`executable`\ is \ :literal:`true`\ as well, the module will fail. | **required**: False | **type**: bool @@ -47,7 +47,7 @@ asa_text backup Specifies whether a backup of the destination should be created before copying data. - When set to ``true``, the module creates a backup file or data set. + When set to \ :literal:`true`\ , the module creates a backup file or data set. The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -59,24 +59,24 @@ backup backup_name Specify a unique USS file name or data set name for the destination backup. - If the destination ``dest`` is a USS file or path, the ``backup_name`` must be an absolute path name. + If the destination \ :literal:`dest`\ is a USS file or path, the \ :literal:`backup\_name`\ must be an absolute path name. - If the destination is an MVS data set name, the ``backup_name`` provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. + If the destination is an MVS data set name, the \ :literal:`backup\_name`\ provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. - If the ``backup_name`` is not provided, the default ``backup_name`` will be used. If the ``dest`` is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the ``dest`` is an MVS data set, it will be a data set with a randomly generated name. + If the \ :literal:`backup\_name`\ is not provided, the default \ :literal:`backup\_name`\ will be used. If the \ :literal:`dest`\ is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the \ :literal:`dest`\ is an MVS data set, it will be a data set with a randomly generated name. - If ``dest`` is a data set member and ``backup_name`` is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + If \ :literal:`dest`\ is a data set member and \ :literal:`backup\_name`\ is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. | **required**: False | **type**: str content - When used instead of ``src``, sets the contents of a file or data set directly to the specified value. + When used instead of \ :literal:`src`\ , sets the contents of a file or data set directly to the specified value. - Works only when ``dest`` is a USS file, sequential data set, or a partitioned data set member. + Works only when \ :literal:`dest`\ is a USS file, sequential data set, or a partitioned data set member. - If ``dest`` is a directory, then content will be copied to ``/path/to/dest/inline_copy``. + If \ :literal:`dest`\ is a directory, then content will be copied to \ :literal:`/path/to/dest/inline\_copy`\ . | **required**: False | **type**: str @@ -85,27 +85,27 @@ content dest The remote absolute path or data set where the content should be copied to. - ``dest`` can be a USS file, directory or MVS data set name. + \ :literal:`dest`\ can be a USS file, directory or MVS data set name. - If ``dest`` has missing parent directories, they will be created. + If \ :literal:`dest`\ has missing parent directories, they will be created. - If ``dest`` is a nonexistent USS file, it will be created. + If \ :literal:`dest`\ is a nonexistent USS file, it will be created. - If ``dest`` is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the copy will fail. + If \ :literal:`dest`\ is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the module will fail. - If ``dest`` is a nonexistent data set, it will be created following the process outlined here and in the ``volume`` option. + If \ :literal:`dest`\ is a nonexistent data set, it will be created following the process outlined here and in the \ :literal:`volume`\ option. - If ``dest`` is a nonexistent data set, the attributes assigned will depend on the type of ``src``. If ``src`` is a USS file, ``dest`` will have a Fixed Block (FB) record format and the remaining attributes will be computed. If *is_binary=true*, ``dest`` will have a Fixed Block (FB) record format with a record length of 80, block size of 32760, and the remaining attributes will be computed. If *executable=true*,``dest`` will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. + If \ :literal:`dest`\ is a nonexistent data set, the attributes assigned will depend on the type of \ :literal:`src`\ . If \ :literal:`src`\ is a USS file, \ :literal:`dest`\ will have a Fixed Block (FB) record format and the remaining attributes will be computed. If \ :emphasis:`is\_binary=true`\ , \ :literal:`dest`\ will have a Fixed Block (FB) record format with a record length of 80, block size of 32760, and the remaining attributes will be computed. If \ :emphasis:`executable=true`\ ,\ :literal:`dest`\ will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. - When ``dest`` is a data set, precedence rules apply. If ``dest_data_set`` is set, this will take precedence over an existing data set. If ``dest`` is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, ``dest`` will be created with the same attributes of ``src``. + When \ :literal:`dest`\ is a data set, precedence rules apply. If \ :literal:`dest\_data\_set`\ is set, this will take precedence over an existing data set. If \ :literal:`dest`\ is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, \ :literal:`dest`\ will be created with the same attributes of \ :literal:`src`\ . - When the ``dest`` is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) ``dest`` will be deleted and recreated following the process outlined in the ``volume`` option. + When the \ :literal:`dest`\ is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) \ :literal:`dest`\ will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. - When the ``dest`` is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the ``volume`` option. + When the \ :literal:`dest`\ is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. - When ``dest`` is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the ``volume`` option. + When \ :literal:`dest`\ is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. - When ``dest`` is a data set, you can override storage management rules by specifying ``volume`` if the storage class being used has GUARANTEED_SPACE=YES specified, otherwise, the allocation will fail. See ``volume`` for more volume related processes. + When \ :literal:`dest`\ is a data set, you can override storage management rules by specifying \ :literal:`volume`\ if the storage class being used has GUARANTEED\_SPACE=YES specified, otherwise, the allocation will fail. See \ :literal:`volume`\ for more volume related processes. | **required**: True | **type**: str @@ -114,9 +114,9 @@ dest encoding Specifies which encodings the destination file or data set should be converted from and to. - If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. + If \ :literal:`encoding`\ is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. - Only valid if ``is_binary`` is false. + Only valid if \ :literal:`is\_binary`\ is false. | **required**: False | **type**: dict @@ -132,7 +132,7 @@ encoding to The encoding to be converted to - | **required**: True + | **required**: False | **type**: str @@ -140,22 +140,22 @@ encoding tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str force - If set to ``true`` and the remote file or data set ``dest`` is empty, the ``dest`` will be reused. + If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ is empty, the \ :literal:`dest`\ will be reused. - If set to ``true`` and the remote file or data set ``dest`` is NOT empty, the ``dest`` will be deleted and recreated with the ``src`` data set attributes, otherwise it will be recreated with the ``dest`` data set attributes. + If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ is NOT empty, the \ :literal:`dest`\ will be deleted and recreated with the \ :literal:`src`\ data set attributes, otherwise it will be recreated with the \ :literal:`dest`\ data set attributes. - To backup data before any deletion, see parameters ``backup`` and ``backup_name``. + To backup data before any deletion, see parameters \ :literal:`backup`\ and \ :literal:`backup\_name`\ . - If set to ``false``, the file or data set will only be copied if the destination does not exist. + If set to \ :literal:`false`\ , the file or data set will only be copied if the destination does not exist. - If set to ``false`` and destination exists, the module exits with a note to the user. + If set to \ :literal:`false`\ and destination exists, the module exits with a note to the user. | **required**: False | **type**: bool @@ -163,11 +163,11 @@ force force_lock - By default, when ``dest`` is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use ``force_lock`` to bypass this check and continue with copy. + By default, when \ :literal:`dest`\ is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use \ :literal:`force\_lock`\ to bypass this check and continue with copy. - If set to ``true`` and destination is a MVS data set opened by another process then zos_copy will try to copy using DISP=SHR. + If set to \ :literal:`true`\ and destination is a MVS data set opened by another process then zos\_copy will try to copy using DISP=SHR. - Using ``force_lock`` uses operations that are subject to race conditions and can lead to data loss, use with caution. + Using \ :literal:`force\_lock`\ uses operations that are subject to race conditions and can lead to data loss, use with caution. If a data set member has aliases, and is not a program object, copying that member to a dataset that is in use will result in the aliases not being preserved in the target dataset. When this scenario occurs the module will fail. @@ -177,9 +177,9 @@ force_lock ignore_sftp_stderr - During data transfer through SFTP, the module fails if the SFTP command directs any content to stderr. The user is able to override this behavior by setting this parameter to ``true``. By doing so, the module would essentially ignore the stderr stream produced by SFTP and continue execution. + During data transfer through SFTP, the module fails if the SFTP command directs any content to stderr. The user is able to override this behavior by setting this parameter to \ :literal:`true`\ . By doing so, the module would essentially ignore the stderr stream produced by SFTP and continue execution. - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using \ :strong:`-vvvv`\ or through environment variables such as \ :strong:`verbosity = 4`\ , then this parameter will automatically be set to \ :literal:`true`\ . | **required**: False | **type**: bool @@ -187,11 +187,11 @@ ignore_sftp_stderr is_binary - If set to ``true``, indicates that the file or data set to be copied is a binary file or data set. + If set to \ :literal:`true`\ , indicates that the file or data set to be copied is a binary file or data set. - When *is_binary=true*, no encoding conversion is applied to the content, all content transferred retains the original state. + When \ :emphasis:`is\_binary=true`\ , no encoding conversion is applied to the content, all content transferred retains the original state. - Use *is_binary=true* when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. + Use \ :emphasis:`is\_binary=true`\ when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. | **required**: False | **type**: bool @@ -199,15 +199,15 @@ is_binary executable - If set to ``true``, indicates that the file or library to be copied is an executable. + If set to \ :literal:`true`\ , indicates that the file or library to be copied is an executable. - If the ``src`` executable has an alias, the alias information is also copied. If the ``dest`` is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. + If the \ :literal:`src`\ executable has an alias, the alias information is also copied. If the \ :literal:`dest`\ is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. - If *executable=true*, and ``dest`` is a data set, it must be a PDS or PDSE (library). + If \ :emphasis:`executable=true`\ , and \ :literal:`dest`\ is a data set, it must be a PDS or PDSE (library). - If ``dest`` is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. + If \ :literal:`dest`\ is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. - If ``dest`` is a file, execute permission for the user will be added to the file (``u+x``). + If \ :literal:`dest`\ is a file, execute permission for the user will be added to the file (\`\`u+x\`\`). | **required**: False | **type**: bool @@ -215,9 +215,9 @@ executable aliases - If set to ``true``, indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. + If set to \ :literal:`true`\ , indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. - Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when ``executable=True`` and ``dest`` is a USS file or directory, this option will be ignored. + Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when \ :literal:`executable=True`\ and \ :literal:`dest`\ is a USS file or directory, this option will be ignored. Copying of aliases for text-based data sets from USS sources or to USS destinations is not currently supported. @@ -234,25 +234,47 @@ local_follow | **default**: True +group + Name of the group that will own the file system objects. + + When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. + + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. + + | **required**: False + | **type**: str + + mode The permission of the destination file or directory. - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. + + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + + The mode may also be specified as a symbolic mode (for example, \`\`u+rwx\`\` or \`\`u=rw,g=r,o=r\`\`) or a special string \`preserve\`. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the source file. + + | **required**: False + | **type**: str + + +owner + Name of the user that should own the filesystem object, as would be passed to the chown command. - The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. + When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - *mode=preserve* means that the file will be given the same permissions as the source file. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str remote_src - If set to ``false``, the module searches for ``src`` at the local machine. + If set to \ :literal:`false`\ , the module searches for \ :literal:`src`\ at the local machine. - If set to ``true``, the module goes to the remote/target machine for ``src``. + If set to \ :literal:`true`\ , the module goes to the remote/target machine for \ :literal:`src`\ . | **required**: False | **type**: bool @@ -262,23 +284,23 @@ remote_src src Path to a file/directory or name of a data set to copy to remote z/OS system. - If ``remote_src`` is true, then ``src`` must be the path to a Unix System Services (USS) file, name of a data set, or data set member. + If \ :literal:`remote\_src`\ is true, then \ :literal:`src`\ must be the path to a Unix System Services (USS) file, name of a data set, or data set member. - If ``src`` is a local path or a USS path, it can be absolute or relative. + If \ :literal:`src`\ is a local path or a USS path, it can be absolute or relative. - If ``src`` is a directory, ``dest`` must be a partitioned data set or a USS directory. + If \ :literal:`src`\ is a directory, \ :literal:`dest`\ must be a partitioned data set or a USS directory. - If ``src`` is a file and ``dest`` ends with "/" or is a directory, the file is copied to the directory with the same filename as ``src``. + If \ :literal:`src`\ is a file and \ :literal:`dest`\ ends with "/" or is a directory, the file is copied to the directory with the same filename as \ :literal:`src`\ . - If ``src`` is a directory and ends with "/", the contents of it will be copied into the root of ``dest``. If it doesn't end with "/", the directory itself will be copied. + If \ :literal:`src`\ is a directory and ends with "/", the contents of it will be copied into the root of \ :literal:`dest`\ . If it doesn't end with "/", the directory itself will be copied. - If ``src`` is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. + If \ :literal:`src`\ is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. - If ``src`` is a VSAM data set, ``dest`` must also be a VSAM. + If \ :literal:`src`\ is a VSAM data set, \ :literal:`dest`\ must also be a VSAM. Wildcards can be used to copy multiple PDS/PDSE members to another PDS/PDSE. - Required unless using ``content``. + Required unless using \ :literal:`content`\ . | **required**: False | **type**: str @@ -295,22 +317,22 @@ validate volume - If ``dest`` does not exist, specify which volume ``dest`` should be allocated to. + If \ :literal:`dest`\ does not exist, specify which volume \ :literal:`dest`\ should be allocated to. Only valid when the destination is an MVS data set. The volume must already be present on the device. - If no volume is specified, storage management rules will be used to determine the volume where ``dest`` will be allocated. + If no volume is specified, storage management rules will be used to determine the volume where \ :literal:`dest`\ will be allocated. - If the storage administrator has specified a system default unit name and you do not set a ``volume`` name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. + If the storage administrator has specified a system default unit name and you do not set a \ :literal:`volume`\ name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. | **required**: False | **type**: str dest_data_set - Data set attributes to customize a ``dest`` data set to be copied into. + Data set attributes to customize a \ :literal:`dest`\ data set to be copied into. | **required**: False | **type**: dict @@ -321,22 +343,22 @@ dest_data_set | **required**: True | **type**: str - | **choices**: KSDS, ESDS, RRDS, LDS, SEQ, PDS, PDSE, MEMBER, BASIC, LIBRARY + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, member, basic, library space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -345,21 +367,21 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) + If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`fb`\ ) - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str - | **choices**: FB, VB, FBA, VBA, U + | **choices**: fb, vb, fba, vba, u record_length @@ -390,9 +412,9 @@ dest_data_set key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -401,9 +423,9 @@ dest_data_set key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -450,13 +472,13 @@ dest_data_set use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when ``src`` is a local file or directory. + Only valid when \ :literal:`src`\ is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ | **required**: False | **type**: bool @@ -466,9 +488,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . - These options are ignored unless ``use_template`` is true. + These options are ignored unless \ :literal:`use\_template`\ is true. | **required**: False | **type**: dict @@ -547,7 +569,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -743,11 +765,11 @@ Examples remote_src: true volume: '222222' dest_data_set: - type: SEQ + type: seq space_primary: 10 space_secondary: 3 - space_type: K - record_format: VB + space_type: k + record_format: vb record_length: 150 - name: Copy a Program Object and its aliases on a remote system to a new PDSE member MYCOBOL @@ -781,17 +803,17 @@ Notes .. note:: Destination data sets are assumed to be in catalog. When trying to copy to an uncataloged data set, the module assumes that the data set does not exist and will create it. - Destination will be backed up if either ``backup`` is ``true`` or ``backup_name`` is provided. If ``backup`` is ``false`` but ``backup_name`` is provided, task will fail. + Destination will be backed up if either \ :literal:`backup`\ is \ :literal:`true`\ or \ :literal:`backup\_name`\ is provided. If \ :literal:`backup`\ is \ :literal:`false`\ but \ :literal:`backup\_name`\ is provided, task will fail. When copying local files or directories, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file or directory being copied. Temporary files will always be deleted, regardless of success or failure of the copy task. VSAM data sets can only be copied to other VSAM data sets. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - Beginning in version 1.8.x, zos_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option ``executable`` that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos_copy.html) error. + Beginning in version 1.8.x, zos\_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option \ :literal:`executable`\ that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos\_copy.html) error. @@ -846,12 +868,12 @@ destination_attributes { "block_size": 32760, - "record_format": "FB", + "record_format": "fb", "record_length": 45, "space_primary": 2, "space_secondary": 1, - "space_type": "K", - "type": "PDSE" + "space_type": "k", + "type": "pdse" } block_size @@ -864,7 +886,7 @@ destination_attributes Record format of the dataset. | **type**: str - | **sample**: FB + | **sample**: fb record_length Record length of the dataset. @@ -888,17 +910,17 @@ destination_attributes Unit of measurement for space. | **type**: str - | **sample**: K + | **sample**: k type Type of dataset allocated. | **type**: str - | **sample**: PDSE + | **sample**: pdse checksum - SHA256 checksum of the file after running zos_copy. + SHA256 checksum of the file after running zos\_copy. | **returned**: When ``validate=true`` and if ``dest`` is USS | **type**: str diff --git a/docs/source/modules/zos_data_set.rst b/docs/source/modules/zos_data_set.rst index 0ea34875f..3300c7d40 100644 --- a/docs/source/modules/zos_data_set.rst +++ b/docs/source/modules/zos_data_set.rst @@ -28,11 +28,11 @@ Parameters name - The name of the data set being managed. (e.g ``USER.TEST``) + The name of the data set being managed. (e.g \ :literal:`USER.TEST`\ ) - If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + If \ :emphasis:`name`\ is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - Required if *type=MEMBER* or *state!=present* and not using *batch*. + Required if \ :emphasis:`type=member`\ or \ :emphasis:`state!=present`\ and not using \ :emphasis:`batch`\ . | **required**: False | **type**: str @@ -41,49 +41,49 @@ name state The final state desired for specified data set. - If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and the data set does not exist on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and the data set does exist on the managed node, remove the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *type=MEMBER* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. + If \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ and \ :emphasis:`force=True`\ , the data set will be opened with \ :emphasis:`DISP=SHR`\ such that the entire data set can be accessed by other processes while the specified member is deleted. - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided \ :emphasis:`volumes`\ . If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=True`\ and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=False`\ and the data set is present on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and *type=MEMBER* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + If \ :emphasis:`state=present`\ and \ :emphasis:`type=member`\ and the member does not exist in the data set, create a member formatted to store data, module completes successfully with \ :emphasis:`changed=True`\ . Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is already cataloged, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, returns failure with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=uncataloged`\ and the data set is not found, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. + If \ :emphasis:`state=uncataloged`\ and the data set is found, the data set is uncataloged, module completes successfully with \ :emphasis:`changed=True`\ . | **required**: False @@ -93,22 +93,22 @@ state type - The data set type to be used when creating a data set. (e.g ``pdse``) + The data set type to be used when creating a data set. (e.g \ :literal:`pdse`\ ). - ``MEMBER`` expects to be used with an existing partitioned data set. + \ :literal:`member`\ expects to be used with an existing partitioned data set. Choices are case-sensitive. | **required**: False | **type**: str - | **default**: PDS - | **choices**: KSDS, ESDS, RRDS, LDS, SEQ, PDS, PDSE, LIBRARY, BASIC, LARGE, MEMBER, HFS, ZFS + | **default**: pds + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs space_primary The amount of primary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -118,7 +118,7 @@ space_primary space_secondary The amount of secondary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -128,25 +128,25 @@ space_secondary space_type The unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **default**: M - | **choices**: K, M, G, CYL, TRK + | **default**: m + | **choices**: k, m, g, cyl, trk record_format - The format of the data set. (e.g ``FB``) + The format of the data set. (e.g \ :literal:`FB`\ ) Choices are case-sensitive. - When *type=KSDS*, *type=ESDS*, *type=RRDS*, *type=LDS* or *type=ZFS* then *record_format=None*, these types do not have a default *record_format*. + When \ :emphasis:`type=ksds`\ , \ :emphasis:`type=esds`\ , \ :emphasis:`type=rrds`\ , \ :emphasis:`type=lds`\ or \ :emphasis:`type=zfs`\ then \ :emphasis:`record\_format=None`\ , these types do not have a default \ :emphasis:`record\_format`\ . | **required**: False | **type**: str - | **default**: FB - | **choices**: FB, VB, FBA, VBA, U, F + | **default**: fb + | **choices**: fb, vb, fba, vba, u, f sms_storage_class @@ -216,9 +216,9 @@ directory_blocks key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -227,28 +227,28 @@ key_offset key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int volumes - If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. + If cataloging a data set, \ :emphasis:`volumes`\ specifies the name of the volume(s) where the data set is located. - If creating a data set, *volumes* specifies the volume(s) where the data set should be created. + If creating a data set, \ :emphasis:`volumes`\ specifies the volume(s) where the data set should be created. - If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=present`\ , and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=absent`\ and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - *volumes* is required when *state=cataloged*. + \ :emphasis:`volumes`\ is required when \ :emphasis:`state=cataloged`\ . Accepts a string when using a single volume and a list of strings when using multiple. @@ -257,12 +257,12 @@ volumes replace - When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. + When \ :emphasis:`replace=True`\ , and \ :emphasis:`state=present`\ , existing data set matching \ :emphasis:`name`\ will be replaced. Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - If *replace=True*, all data in the original data set will be lost. + If \ :emphasis:`replace=True`\ , all data in the original data set will be lost. | **required**: False | **type**: bool @@ -272,7 +272,7 @@ replace tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -283,9 +283,9 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :emphasis:`force=True`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . - The *force=True* only applies to data set members when *state=absent* and *type=MEMBER*. + The \ :emphasis:`force=True`\ only applies to data set members when \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ . | **required**: False | **type**: bool @@ -301,11 +301,11 @@ batch name - The name of the data set being managed. (e.g ``USER.TEST``) + The name of the data set being managed. (e.g \ :literal:`USER.TEST`\ ) - If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + If \ :emphasis:`name`\ is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - Required if *type=MEMBER* or *state!=present* + Required if \ :emphasis:`type=member`\ or \ :emphasis:`state!=present`\ | **required**: False | **type**: str @@ -314,49 +314,49 @@ batch state The final state desired for specified data set. - If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and the data set does not exist on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and the data set does exist on the managed node, remove the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *type=MEMBER* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. + If \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ and \ :emphasis:`force=True`\ , the data set will be opened with \ :emphasis:`DISP=SHR`\ such that the entire data set can be accessed by other processes while the specified member is deleted. - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with \ :emphasis:`changed=True`\ . - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. + If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided \ :emphasis:`volumes`\ . If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=True`\ and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=present`\ and \ :emphasis:`replace=False`\ and the data set is present on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=present* and *type=MEMBER* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + If \ :emphasis:`state=present`\ and \ :emphasis:`type=member`\ and the member does not exist in the data set, create a member formatted to store data, module completes successfully with \ :emphasis:`changed=True`\ . Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is already cataloged, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, module completes successfully with \ :emphasis:`changed=True`\ . - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. + If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, returns failure with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. + If \ :emphasis:`state=uncataloged`\ and the data set is not found, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . - If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. + If \ :emphasis:`state=uncataloged`\ and the data set is found, the data set is uncataloged, module completes successfully with \ :emphasis:`changed=True`\ . | **required**: False @@ -366,22 +366,22 @@ batch type - The data set type to be used when creating a data set. (e.g ``PDSE``) + The data set type to be used when creating a data set. (e.g \ :literal:`pdse`\ ) - ``MEMBER`` expects to be used with an existing partitioned data set. + \ :literal:`member`\ expects to be used with an existing partitioned data set. Choices are case-sensitive. | **required**: False | **type**: str - | **default**: PDS - | **choices**: KSDS, ESDS, RRDS, LDS, SEQ, PDS, PDSE, LIBRARY, BASIC, LARGE, MEMBER, HFS, ZFS + | **default**: pds + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs space_primary The amount of primary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -391,7 +391,7 @@ batch space_secondary The amount of secondary space to allocate for the dataset. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -401,25 +401,25 @@ batch space_type The unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **default**: M - | **choices**: K, M, G, CYL, TRK + | **default**: m + | **choices**: k, m, g, cyl, trk record_format - The format of the data set. (e.g ``FB``) + The format of the data set. (e.g \ :literal:`FB`\ ) Choices are case-sensitive. - When *type=KSDS*, *type=ESDS*, *type=RRDS*, *type=LDS* or *type=ZFS* then *record_format=None*, these types do not have a default *record_format*. + When \ :emphasis:`type=ksds`\ , \ :emphasis:`type=esds`\ , \ :emphasis:`type=rrds`\ , \ :emphasis:`type=lds`\ or \ :emphasis:`type=zfs`\ then \ :emphasis:`record\_format=None`\ , these types do not have a default \ :emphasis:`record\_format`\ . | **required**: False | **type**: str - | **default**: FB - | **choices**: FB, VB, FBA, VBA, U, F + | **default**: fb + | **choices**: fb, vb, fba, vba, u, f sms_storage_class @@ -489,9 +489,9 @@ batch key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -500,28 +500,28 @@ batch key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int volumes - If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. + If cataloging a data set, \ :emphasis:`volumes`\ specifies the name of the volume(s) where the data set is located. - If creating a data set, *volumes* specifies the volume(s) where the data set should be created. + If creating a data set, \ :emphasis:`volumes`\ specifies the volume(s) where the data set should be created. - If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=present`\ , and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=absent`\ and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - *volumes* is required when *state=cataloged*. + \ :emphasis:`volumes`\ is required when \ :emphasis:`state=cataloged`\ . Accepts a string when using a single volume and a list of strings when using multiple. @@ -530,12 +530,12 @@ batch replace - When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. + When \ :emphasis:`replace=True`\ , and \ :emphasis:`state=present`\ , existing data set matching \ :emphasis:`name`\ will be replaced. Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - If *replace=True*, all data in the original data set will be lost. + If \ :emphasis:`replace=True`\ , all data in the original data set will be lost. | **required**: False | **type**: bool @@ -547,9 +547,9 @@ batch This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :emphasis:`force=True`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . - The *force=True* only applies to data set members when *state=absent* and *type=MEMBER*. + The \ :emphasis:`force=True`\ only applies to data set members when \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ . | **required**: False | **type**: bool @@ -568,7 +568,7 @@ Examples - name: Create a sequential data set if it does not exist zos_data_set: name: someds.name.here - type: SEQ + type: seq state: present - name: Create a PDS data set if it does not exist @@ -576,27 +576,27 @@ Examples name: someds.name.here type: pds space_primary: 5 - space_type: M - record_format: FBA + space_type: m + record_format: fba record_length: 25 - name: Attempt to replace a data set if it exists zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 replace: yes - name: Attempt to replace a data set if it exists. If not found in the catalog, check if it is available on volume 222222, and catalog if found. zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 volumes: "222222" replace: yes @@ -604,19 +604,19 @@ Examples - name: Create an ESDS data set if it does not exist zos_data_set: name: someds.name.here - type: ESDS + type: esds - name: Create a KSDS data set if it does not exist zos_data_set: name: someds.name.here - type: KSDS + type: ksds key_length: 8 key_offset: 0 - name: Create an RRDS data set with storage class MYDATA if it does not exist zos_data_set: name: someds.name.here - type: RRDS + type: rrds sms_storage_class: mydata - name: Delete a data set if it exists @@ -633,43 +633,43 @@ Examples - name: Write a member to an existing PDS; replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member replace: yes - name: Write a member to an existing PDS; do not replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member - name: Remove a member from an existing PDS zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member - name: Remove a member from an existing PDS/E by opening with disposition DISP=SHR zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member force: yes - name: Create multiple partitioned data sets and add one or more members to each zos_data_set: batch: - name: someds.name.here1 - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: FB + space_type: m + record_format: fb replace: yes - name: someds.name.here1(member1) - type: MEMBER + type: member - name: someds.name.here2(member1) - type: MEMBER + type: member replace: yes - name: someds.name.here2(member2) - type: MEMBER + type: member - name: Catalog a data set present on volume 222222 if it is uncataloged. zos_data_set: diff --git a/docs/source/modules/zos_encode.rst b/docs/source/modules/zos_encode.rst index 4c2294e24..68089a3a6 100644 --- a/docs/source/modules/zos_encode.rst +++ b/docs/source/modules/zos_encode.rst @@ -37,7 +37,7 @@ encoding from - The character set of the source *src*. + The character set of the source \ :emphasis:`src`\ . | **required**: False | **type**: str @@ -45,7 +45,7 @@ encoding to - The destination *dest* character set for the output to be written as. + The destination \ :emphasis:`dest`\ character set for the output to be written as. | **required**: False | **type**: str @@ -58,7 +58,7 @@ src The USS path or file must be an absolute pathname. - If *src* is a USS directory, all files will be encoded. + If \ :emphasis:`src`\ is a USS directory, all files will be encoded. | **required**: True | **type**: str @@ -67,11 +67,11 @@ src dest The location where the converted characters are output. - The destination *dest* can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, or KSDS (VSAM data set). + The destination \ :emphasis:`dest`\ can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, or KSDS (VSAM data set). - If the length of the PDSE member name used in *dest* is greater than 8 characters, the member name will be truncated when written out. + If the length of the PDSE member name used in \ :emphasis:`dest`\ is greater than 8 characters, the member name will be truncated when written out. - If *dest* is not specified, the *src* will be used as the destination and will overwrite the *src* with the character set in the option *to_encoding*. + If \ :emphasis:`dest`\ is not specified, the \ :emphasis:`src`\ will be used as the destination and will overwrite the \ :emphasis:`src`\ with the character set in the option \ :emphasis:`to\_encoding`\ . The USS file or path must be an absolute pathname. @@ -80,9 +80,9 @@ dest backup - Creates a backup file or backup data set for *dest*, including the timestamp information to ensure that you retrieve the original file. + Creates a backup file or backup data set for \ :emphasis:`dest`\ , including the timestamp information to ensure that you retrieve the original file. - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . | **required**: False | **type**: bool @@ -92,13 +92,13 @@ backup backup_name Specify the USS file name or data set name for the dest backup. - If dest is a USS file or path, *backup_name* must be a file or path name, and the USS path or file must be an absolute pathname. + If dest is a USS file or path, \ :emphasis:`backup\_name`\ must be a file or path name, and the USS path or file must be an absolute pathname. - If dest is an MVS data set, the *backup_name* must be an MVS data set name. + If dest is an MVS data set, the \ :emphasis:`backup\_name`\ must be an MVS data set name. - If *backup_name* is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. + If \ :emphasis:`backup\_name`\ is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file\_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. - ``backup_name`` will be returned on either success or failure of module execution such that data can be retrieved. + \ :literal:`backup\_name`\ will be returned on either success or failure of module execution such that data can be retrieved. | **required**: False | **type**: str @@ -107,7 +107,7 @@ backup_name backup_compress Determines if backups to USS files or paths should be compressed. - *backup_compress* is only used when *backup=true*. + \ :emphasis:`backup\_compress`\ is only used when \ :emphasis:`backup=true`\ . | **required**: False | **type**: bool @@ -117,7 +117,7 @@ backup_compress tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -265,7 +265,7 @@ Notes All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . @@ -278,7 +278,7 @@ Return Values src - The location of the input characters identified in option *src*. + The location of the input characters identified in option \ :emphasis:`src`\ . | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_fetch.rst b/docs/source/modules/zos_fetch.rst index 87a50a65a..7cdcabbd5 100644 --- a/docs/source/modules/zos_fetch.rst +++ b/docs/source/modules/zos_fetch.rst @@ -20,7 +20,7 @@ Synopsis - When fetching a sequential data set, the destination file name will be the same as the data set name. - When fetching a PDS or PDSE, the destination will be a directory with the same name as the PDS or PDSE. - When fetching a PDS/PDSE member, destination will be a file. -- Files that already exist at ``dest`` will be overwritten if they are different than ``src``. +- Files that already exist at \ :literal:`dest`\ will be overwritten if they are different than \ :literal:`src`\ . @@ -96,7 +96,7 @@ encoding from - The character set of the source *src*. + The character set of the source \ :emphasis:`src`\ . Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -105,7 +105,7 @@ encoding to - The destination *dest* character set for the output to be written as. + The destination \ :emphasis:`dest`\ character set for the output to be written as. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -117,16 +117,16 @@ encoding tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str ignore_sftp_stderr - During data transfer through sftp, the module fails if the sftp command directs any content to stderr. The user is able to override this behavior by setting this parameter to ``true``. By doing so, the module would essentially ignore the stderr stream produced by sftp and continue execution. + During data transfer through sftp, the module fails if the sftp command directs any content to stderr. The user is able to override this behavior by setting this parameter to \ :literal:`true`\ . By doing so, the module would essentially ignore the stderr stream produced by sftp and continue execution. - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using \ :strong:`-vvvv`\ or through environment variables such as \ :strong:`verbosity = 4`\ , then this parameter will automatically be set to \ :literal:`true`\ . | **required**: False | **type**: bool @@ -196,13 +196,13 @@ Notes .. note:: When fetching PDSE and VSAM data sets, temporary storage will be used on the remote z/OS system. After the PDSE or VSAM data set is successfully transferred, the temporary storage will be deleted. The size of the temporary storage will correspond to the size of PDSE or VSAM data set being fetched. If module execution fails, the temporary storage will be deleted. - To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the ``checksum`` parameter. + To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the \ :literal:`checksum`\ parameter. All data sets are always assumed to be cataloged. If an uncataloged data set needs to be fetched, it should be cataloged first. Fetching HFS or ZFS type data sets is currently not supported. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. @@ -263,7 +263,7 @@ data_set_type | **sample**: PDSE note - Notice of module failure when ``fail_on_missing`` is false. + Notice of module failure when \ :literal:`fail\_on\_missing`\ is false. | **returned**: failure and fail_on_missing=false | **type**: str diff --git a/docs/source/modules/zos_find.rst b/docs/source/modules/zos_find.rst index f195b2c2c..83082b5c0 100644 --- a/docs/source/modules/zos_find.rst +++ b/docs/source/modules/zos_find.rst @@ -18,7 +18,7 @@ Synopsis -------- - Return a list of data sets based on specific criteria. - Multiple criteria can be added (AND'd) together. -- The ``zos_find`` module can only find MVS data sets. Use the `find `_ module to find USS files. +- The \ :literal:`zos\_find`\ module can only find MVS data sets. Use the \ `find `__\ module to find USS files. @@ -44,9 +44,9 @@ age age_stamp Choose the age property against which to compare age. - ``creation_date`` is the date the data set was created and ``ref_date`` is the date the data set was last referenced. + \ :literal:`creation\_date`\ is the date the data set was created and \ :literal:`ref\_date`\ is the date the data set was last referenced. - ``ref_date`` is only applicable to sequential and partitioned data sets. + \ :literal:`ref\_date`\ is only applicable to sequential and partitioned data sets. | **required**: False | **type**: str @@ -80,7 +80,7 @@ patterns This parameter expects a list, which can be either comma separated or YAML. - If ``pds_patterns`` is provided, ``patterns`` must be member patterns. + If \ :literal:`pds\_patterns`\ is provided, \ :literal:`patterns`\ must be member patterns. When searching for members within a PDS/PDSE, pattern can be a regular expression. @@ -107,7 +107,7 @@ pds_patterns Required when searching for data set members. - Valid only for ``nonvsam`` resource types. Otherwise ignored. + Valid only for \ :literal:`nonvsam`\ resource types. Otherwise ignored. | **required**: False | **type**: list @@ -117,9 +117,9 @@ pds_patterns resource_type The type of resource to search. - ``nonvsam`` refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. + \ :literal:`nonvsam`\ refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. - ``cluster`` refers to a VSAM cluster. The ``data`` and ``index`` are the data and index components of a VSAM cluster. + \ :literal:`cluster`\ refers to a VSAM cluster. The \ :literal:`data`\ and \ :literal:`index`\ are the data and index components of a VSAM cluster. | **required**: False | **type**: str @@ -192,11 +192,11 @@ Notes ----- .. note:: - Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. + Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The \ `zos\_data\_set <./zos_data_set.html>`__\ module can be used to catalog uncataloged data sets. - The `zos_find <./zos_find.html>`_ module currently does not support wildcards for high level qualifiers. For example, ``SOME.*.DATA.SET`` is a valid pattern, but ``*.DATA.SET`` is not. + The \ `zos\_find <./zos_find.html>`__\ module currently does not support wildcards for high level qualifiers. For example, \ :literal:`SOME.\*.DATA.SET`\ is a valid pattern, but \ :literal:`\*.DATA.SET`\ is not. - If a data set pattern is specified as ``USER.*``, the matching data sets will have two name segments such as ``USER.ABC``, ``USER.XYZ`` etc. If a wildcard is specified as ``USER.*.ABC``, the matching data sets will have three name segments such as ``USER.XYZ.ABC``, ``USER.TEST.ABC`` etc. + If a data set pattern is specified as \ :literal:`USER.\*`\ , the matching data sets will have two name segments such as \ :literal:`USER.ABC`\ , \ :literal:`USER.XYZ`\ etc. If a wildcard is specified as \ :literal:`USER.\*.ABC`\ , the matching data sets will have three name segments such as \ :literal:`USER.XYZ.ABC`\ , \ :literal:`USER.TEST.ABC`\ etc. The time taken to execute the module is proportional to the number of data sets present on the system and how large the data sets are. diff --git a/docs/source/modules/zos_gather_facts.rst b/docs/source/modules/zos_gather_facts.rst index 0247ffd96..02a56fd23 100644 --- a/docs/source/modules/zos_gather_facts.rst +++ b/docs/source/modules/zos_gather_facts.rst @@ -17,8 +17,8 @@ zos_gather_facts -- Gather z/OS system facts. Synopsis -------- - Retrieve variables from target z/OS systems. -- Variables are added to the *ansible_facts* dictionary, available to playbooks. -- Apply filters on the *gather_subset* list to reduce the variables that are added to the *ansible_facts* dictionary. +- Variables are added to the \ :emphasis:`ansible\_facts`\ dictionary, available to playbooks. +- Apply filters on the \ :emphasis:`gather\_subset`\ list to reduce the variables that are added to the \ :emphasis:`ansible\_facts`\ dictionary. - Note, the module will fail fast if any unsupported options are provided. This is done to raise awareness of a failure in an automation setting. @@ -32,7 +32,7 @@ Parameters gather_subset If specified, it will collect facts that come under the specified subset (eg. ipl will return ipl facts). Specifying subsets is recommended to reduce time in gathering facts when the facts needed are in a specific subset. - The following subsets are available ``ipl``, ``cpu``, ``sys``, and ``iodf``. Depending on the version of ZOAU, additional subsets may be available. + The following subsets are available \ :literal:`ipl`\ , \ :literal:`cpu`\ , \ :literal:`sys`\ , and \ :literal:`iodf`\ . Depending on the version of ZOAU, additional subsets may be available. | **required**: False | **type**: list @@ -41,13 +41,13 @@ gather_subset filter - Filter out facts from the *ansible_facts* dictionary. + Filter out facts from the \ :emphasis:`ansible\_facts`\ dictionary. - Uses shell-style `fnmatch `_ pattern matching to filter out the collected facts. + Uses shell-style \ `fnmatch `__\ pattern matching to filter out the collected facts. - An empty list means 'no filter', same as providing '*'. + An empty list means 'no filter', same as providing '\*'. - Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the *ansible_facts* dictionary. To restrict the facts that are collected, refer to the *gather_subset* parameter. + Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the \ :emphasis:`ansible\_facts`\ dictionary. To restrict the facts that are collected, refer to the \ :emphasis:`gather\_subset`\ parameter. | **required**: False | **type**: list diff --git a/docs/source/modules/zos_job_output.rst b/docs/source/modules/zos_job_output.rst index efea6ea2a..59e37aeb9 100644 --- a/docs/source/modules/zos_job_output.rst +++ b/docs/source/modules/zos_job_output.rst @@ -18,9 +18,9 @@ Synopsis -------- - Display the z/OS job output for a given criteria (Job id/Job name/owner) with/without a data definition name as a filter. - At least provide a job id/job name/owner. -- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC*" or "*". -- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP*" or "*". -- The owner can be specific such as "IBMUSER", or one that uses a pattern like "*". +- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC\*" or "\*". +- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP\*" or "\*". +- The owner can be specific such as "IBMUSER", or one that uses a pattern like "\*". - If there is no ddname, or if ddname="?", output of all the ddnames under the given job will be displayed. @@ -32,21 +32,21 @@ Parameters job_id - The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC*") + The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC\*") | **required**: False | **type**: str job_name - The name of the batch job. (e.g "TCPIP", "C*") + The name of the batch job. (e.g "TCPIP", "C\*") | **required**: False | **type**: str owner - The owner who ran the job. (e.g "IBMUSER", "*") + The owner who ran the job. (e.g "IBMUSER", "\*") | **required**: False | **type**: str @@ -97,7 +97,7 @@ Return Values jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret\_code dictionary with parameter msg\_txt = The job could not be found. | **returned**: success | **type**: list @@ -416,7 +416,7 @@ jobs | **sample**: CC 0000 msg_code - Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + Return code extracted from the \`msg\` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". | **type**: str | **sample**: S0C4 diff --git a/docs/source/modules/zos_job_query.rst b/docs/source/modules/zos_job_query.rst index ea320dfc3..e4da71341 100644 --- a/docs/source/modules/zos_job_query.rst +++ b/docs/source/modules/zos_job_query.rst @@ -17,8 +17,8 @@ zos_job_query -- Query job status Synopsis -------- - List z/OS job(s) and the current status of the job(s). -- Uses job_name to filter the jobs by the job name. -- Uses job_id to filter the jobs by the job identifier. +- Uses job\_name to filter the jobs by the job name. +- Uses job\_id to filter the jobs by the job identifier. - Uses owner to filter the jobs by the job owner. - Uses system to filter the jobs by system where the job is running (or ran) on. @@ -35,9 +35,9 @@ job_name A job name can be up to 8 characters long. - The *job_name* can contain include multiple wildcards. + The \ :emphasis:`job\_name`\ can contain include multiple wildcards. - The asterisk (`*`) wildcard will match zero or more specified characters. + The asterisk (\`\*\`) wildcard will match zero or more specified characters. | **required**: False | **type**: str @@ -56,13 +56,13 @@ owner job_id The job id that has been assigned to the job. - A job id must begin with `STC`, `JOB`, `TSU` and are followed by up to 5 digits. + A job id must begin with \`STC\`, \`JOB\`, \`TSU\` and are followed by up to 5 digits. - When a job id is greater than 99,999, the job id format will begin with `S`, `J`, `T` and are followed by 7 digits. + When a job id is greater than 99,999, the job id format will begin with \`S\`, \`J\`, \`T\` and are followed by 7 digits. - The *job_id* can contain include multiple wildcards. + The \ :emphasis:`job\_id`\ can contain include multiple wildcards. - The asterisk (`*`) wildcard will match zero or more specified characters. + The asterisk (\`\*\`) wildcard will match zero or more specified characters. | **required**: False | **type**: str @@ -122,7 +122,7 @@ changed | **type**: bool jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret\_code dictionary with parameter msg\_txt = The job could not be found. | **returned**: success | **type**: list @@ -211,7 +211,7 @@ jobs | **sample**: CC 0000 msg_code - Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + Return code extracted from the \`msg\` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". | **type**: str | **sample**: S0C4 diff --git a/docs/source/modules/zos_job_submit.rst b/docs/source/modules/zos_job_submit.rst index 8f4dda61b..964ab8f4b 100644 --- a/docs/source/modules/zos_job_submit.rst +++ b/docs/source/modules/zos_job_submit.rst @@ -42,24 +42,24 @@ src location - The JCL location. Supported choices are ``DATA_SET``, ``USS`` or ``LOCAL``. + The JCL location. Supported choices are \ :literal:`data\_set`\ , \ :literal:`uss`\ or \ :literal:`local`\ . - DATA_SET can be a PDS, PDSE, or sequential data set. + \ :literal:`data\_set`\ can be a PDS, PDSE, or sequential data set. - USS means the JCL location is located in UNIX System Services (USS). + \ :literal:`uss`\ means the JCL location is located in UNIX System Services (USS). - LOCAL means locally to the ansible control node. + \ :literal:`local`\ means locally to the ansible control node. | **required**: False | **type**: str - | **default**: DATA_SET - | **choices**: DATA_SET, USS, LOCAL + | **default**: data_set + | **choices**: data_set, uss, local wait_time_s - Option *wait_time_s* is the total time that module `zos_job_submit <./zos_job_submit.html>`_ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. + Option \ :emphasis:`wait\_time\_s`\ is the total time that module \ `zos\_job\_submit <./zos_job_submit.html>`__\ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. - *wait_time_s* is measured in seconds and must be a value greater than 0 and less than 86400. + \ :emphasis:`wait\_time\_s`\ is measured in seconds and must be a value greater than 0 and less than 86400. | **required**: False | **type**: int @@ -84,11 +84,11 @@ return_output volume - The volume serial (VOLSER)is where the data set resides. The option is required only when the data set is not cataloged on the system. + The volume serial (VOLSER) is where the data set resides. The option is required only when the data set is not cataloged on the system. - When configured, the `zos_job_submit <./zos_job_submit.html>`_ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. + When configured, the \ `zos\_job\_submit <./zos_job_submit.html>`__\ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. - Ignored for *location=USS* and *location=LOCAL*. + Ignored for \ :emphasis:`location=uss`\ and \ :emphasis:`location=local`\ . | **required**: False | **type**: str @@ -97,7 +97,7 @@ volume encoding Specifies which encoding the local JCL file should be converted from and to, before submitting the job. - This option is only supported for when *location=LOCAL*. + This option is only supported for when \ :emphasis:`location=local`\ . If this parameter is not provided, and the z/OS systems default encoding can not be identified, the JCL file will be converted from UTF-8 to IBM-1047 by default, otherwise the module will detect the z/OS system encoding. @@ -129,13 +129,13 @@ encoding use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when ``src`` is a local file or directory. + Only valid when \ :literal:`src`\ is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ | **required**: False | **type**: bool @@ -145,9 +145,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . - These options are ignored unless ``use_template`` is true. + These options are ignored unless \ :literal:`use\_template`\ is true. | **required**: False | **type**: dict @@ -226,7 +226,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -267,22 +267,22 @@ Examples .. code-block:: yaml+jinja - - name: Submit JCL in a PDSE member + - name: Submit JCL in a PDSE member. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set register: response - name: Submit JCL in USS with no DDs in the output. zos_job_submit: src: /u/tester/demo/sample.jcl - location: USS + location: uss return_output: false - name: Convert local JCL to IBM-037 and submit the job. zos_job_submit: src: /Users/maxy/ansible-playbooks/provision/sample.jcl - location: LOCAL + location: local encoding: from: ISO8859-1 to: IBM-037 @@ -290,25 +290,25 @@ Examples - name: Submit JCL in an uncataloged PDSE on volume P2SS01. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set volume: P2SS01 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit JCL and set the max return code the module should fail on to 16. zos_job_submit: src: HLQ.DATA.LLQ - location: DATA_SET + location: data_set max_rc: 16 @@ -318,9 +318,9 @@ Notes ----- .. note:: - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. @@ -333,7 +333,7 @@ Return Values jobs - List of jobs output. If no job status is found, this will return an empty ret_code with msg_txt explanation. + List of jobs output. If no job status is found, this will return an empty ret\_code with msg\_txt explanation. | **returned**: success | **type**: list @@ -680,25 +680,27 @@ jobs msg Job status resulting from the job submission. - Job status `ABEND` indicates the job ended abnormally. + Job status \`ABEND\` indicates the job ended abnormally. - Job status `AC` indicates the job is active, often a started task or job taking long. + Job status \`AC\` indicates the job is active, often a started task or job taking long. - Job status `CAB` indicates a converter abend. + Job status \`CAB\` indicates a converter abend. - Job status `CANCELED` indicates the job was canceled. + Job status \`CANCELED\` indicates the job was canceled. - Job status `CNV` indicates a converter error. + Job status \`CNV\` indicates a converter error. - Job status `FLU` indicates the job was flushed. + Job status \`FLU\` indicates the job was flushed. - Job status `JCLERR` or `JCL ERROR` indicates the JCL has an error. + Job status \`JCLERR\` or \`JCL ERROR\` indicates the JCL has an error. - Job status `SEC` or `SEC ERROR` indicates the job as encountered a security error. + Job status \`SEC\` or \`SEC ERROR\` indicates the job as encountered a security error. - Job status `SYS` indicates a system failure. + Job status \`SYS\` indicates a system failure. - Job status `?` indicates status can not be determined. + Job status \`?\` indicates status can not be determined. + + Jobs where status can not be determined will result in None (NULL). | **type**: str | **sample**: AC @@ -706,18 +708,22 @@ jobs msg_code The return code from the submitted job as a string. + Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. + | **type**: str msg_txt Returns additional information related to the submitted job. + Jobs which have no additional information will result in None (NULL). + | **type**: str | **sample**: The job JOB00551 was run with special job processing TYPRUN=SCAN. This will result in no completion, return code or job steps and changed will be false. code The return code converted to an integer value when available. - Jobs which have no return code will return NULL, such is the case of a job that errors or is active. + Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. | **type**: int @@ -788,10 +794,3 @@ jobs | **sample**: IEBGENER -message - This option is being deprecated - - | **returned**: success - | **type**: str - | **sample**: Submit JCL operation succeeded. - diff --git a/docs/source/modules/zos_lineinfile.rst b/docs/source/modules/zos_lineinfile.rst index f7005017e..983e5ca0b 100644 --- a/docs/source/modules/zos_lineinfile.rst +++ b/docs/source/modules/zos_lineinfile.rst @@ -40,13 +40,13 @@ src regexp The regular expression to look for in every line of the USS file or data set. - For ``state=present``, the pattern to replace if found. Only the last line found will be replaced. + For \ :literal:`state=present`\ , the pattern to replace if found. Only the last line found will be replaced. - For ``state=absent``, the pattern of the line(s) to remove. + For \ :literal:`state=absent`\ , the pattern of the line(s) to remove. - If the regular expression is not matched, the line will be added to the USS file or data set in keeping with ``insertbefore`` or ``insertafter`` settings. + If the regular expression is not matched, the line will be added to the USS file or data set in keeping with \ :literal:`insertbefore`\ or \ :literal:`insertafter`\ settings. - When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by ``line`` to ensure idempotence. + When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by \ :literal:`line`\ to ensure idempotence. | **required**: False | **type**: str @@ -64,22 +64,22 @@ state line The line to insert/replace into the USS file or data set. - Required for ``state=present``. + Required for \ :literal:`state=present`\ . - If ``backrefs`` is set, may contain backreferences that will get expanded with the ``regexp`` capture groups if the regexp matches. + If \ :literal:`backrefs`\ is set, may contain backreferences that will get expanded with the \ :literal:`regexp`\ capture groups if the regexp matches. | **required**: False | **type**: str backrefs - Used with ``state=present``. + Used with \ :literal:`state=present`\ . - If set, ``line`` can contain backreferences (both positional and named) that will get populated if the ``regexp`` matches. + If set, \ :literal:`line`\ can contain backreferences (both positional and named) that will get populated if the \ :literal:`regexp`\ matches. - This parameter changes the operation of the module slightly; ``insertbefore`` and ``insertafter`` will be ignored, and if the ``regexp`` does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. + This parameter changes the operation of the module slightly; \ :literal:`insertbefore`\ and \ :literal:`insertafter`\ will be ignored, and if the \ :literal:`regexp`\ does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. - If the ``regexp`` does match, the last matching line will be replaced by the expanded line parameter. + If the \ :literal:`regexp`\ does match, the last matching line will be replaced by the expanded line parameter. | **required**: False | **type**: bool @@ -87,23 +87,23 @@ backrefs insertafter - Used with ``state=present``. + Used with \ :literal:`state=present`\ . If specified, the line will be inserted after the last match of specified regular expression. If the first match is required, use(firstmatch=yes). - A special value is available; ``EOF`` for inserting the line at the end of the USS file or data set. + A special value is available; \ :literal:`EOF`\ for inserting the line at the end of the USS file or data set. If the specified regular expression has no matches, EOF will be used instead. - If ``insertbefore`` is set, default value ``EOF`` will be ignored. + If \ :literal:`insertbefore`\ is set, default value \ :literal:`EOF`\ will be ignored. - If regular expressions are passed to both ``regexp`` and ``insertafter``, ``insertafter`` is only honored if no match for ``regexp`` is found. + If regular expressions are passed to both \ :literal:`regexp`\ and \ :literal:`insertafter`\ , \ :literal:`insertafter`\ is only honored if no match for \ :literal:`regexp`\ is found. - May not be used with ``backrefs`` or ``insertbefore``. + May not be used with \ :literal:`backrefs`\ or \ :literal:`insertbefore`\ . - Choices are EOF or '*regex*' + Choices are EOF or '\*regex\*' Default is EOF @@ -112,30 +112,30 @@ insertafter insertbefore - Used with ``state=present``. + Used with \ :literal:`state=present`\ . If specified, the line will be inserted before the last match of specified regular expression. - If the first match is required, use ``firstmatch=yes``. + If the first match is required, use \ :literal:`firstmatch=yes`\ . - A value is available; ``BOF`` for inserting the line at the beginning of the USS file or data set. + A value is available; \ :literal:`BOF`\ for inserting the line at the beginning of the USS file or data set. If the specified regular expression has no matches, the line will be inserted at the end of the USS file or data set. - If regular expressions are passed to both ``regexp`` and ``insertbefore``, ``insertbefore`` is only honored if no match for ``regexp`` is found. + If regular expressions are passed to both \ :literal:`regexp`\ and \ :literal:`insertbefore`\ , \ :literal:`insertbefore`\ is only honored if no match for \ :literal:`regexp`\ is found. - May not be used with ``backrefs`` or ``insertafter``. + May not be used with \ :literal:`backrefs`\ or \ :literal:`insertafter`\ . - Choices are BOF or '*regex*' + Choices are BOF or '\*regex\*' | **required**: False | **type**: str backup - Creates a backup file or backup data set for *src*, including the timestamp information to ensure that you retrieve the original file. + Creates a backup file or backup data set for \ :emphasis:`src`\ , including the timestamp information to ensure that you retrieve the original file. - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . The backup file name will be return on either success or failure of module execution such that data can be retrieved. @@ -147,11 +147,11 @@ backup backup_name Specify the USS file name or data set name for the destination backup. - If the source *src* is a USS file or path, the backup_name must be a file or path name, and the USS file or path must be an absolute path name. + If the source \ :emphasis:`src`\ is a USS file or path, the backup\_name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup_name must be an MVS data set name. + If the source is an MVS data set, the backup\_name must be an MVS data set name. - If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -162,16 +162,16 @@ backup_name tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str firstmatch - Used with ``insertafter`` or ``insertbefore``. + Used with \ :literal:`insertafter`\ or \ :literal:`insertbefore`\ . - If set, ``insertafter`` and ``insertbefore`` will work with the first line that matches the given regular expression. + If set, \ :literal:`insertafter`\ and \ :literal:`insertbefore`\ will work with the first line that matches the given regular expression. | **required**: False | **type**: bool @@ -179,7 +179,7 @@ firstmatch encoding - The character set of the source *src*. `zos_lineinfile <./zos_lineinfile.html>`_ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + The character set of the source \ :emphasis:`src`\ . \ `zos\_lineinfile <./zos_lineinfile.html>`__\ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -193,7 +193,7 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. + The \ :literal:`force`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . | **required**: False | **type**: bool @@ -260,7 +260,7 @@ Notes All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . @@ -273,7 +273,7 @@ Return Values changed - Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. + Indicates if the source was modified. Value of 1 represents \`true\`, otherwise \`false\`. | **returned**: success | **type**: bool diff --git a/docs/source/modules/zos_mount.rst b/docs/source/modules/zos_mount.rst index 42e8a8ea6..9a30c5c91 100644 --- a/docs/source/modules/zos_mount.rst +++ b/docs/source/modules/zos_mount.rst @@ -16,9 +16,9 @@ zos_mount -- Mount a z/OS file system. Synopsis -------- -- The module `zos_mount <./zos_mount.html>`_ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. -- The *src* data set must be unique and a Fully Qualified Name (FQN). -- The *path* will be created if needed. +- The module \ `zos\_mount <./zos_mount.html>`__\ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. +- The \ :emphasis:`src`\ data set must be unique and a Fully Qualified Name (FQN). +- The \ :emphasis:`path`\ will be created if needed. @@ -31,7 +31,7 @@ Parameters path The absolute path name onto which the file system is to be mounted. - The *path* is case sensitive and must be less than or equal 1023 characters long. + The \ :emphasis:`path`\ is case sensitive and must be less than or equal 1023 characters long. | **required**: True | **type**: str @@ -40,9 +40,9 @@ path src The name of the file system to be added to the file system hierarchy. - The file system *src* must be a data set of type *fs_type*. + The file system \ :emphasis:`src`\ must be a data set of type \ :emphasis:`fs\_type`\ . - The file system *src* data set must be cataloged. + The file system \ :emphasis:`src`\ data set must be cataloged. | **required**: True | **type**: str @@ -53,35 +53,35 @@ fs_type The physical file systems data set format to perform the logical mount. - The *fs_type* is required to be uppercase. + The \ :emphasis:`fs\_type`\ is required to be lowercase. | **required**: True | **type**: str - | **choices**: HFS, ZFS, NFS, TFS + | **choices**: hfs, zfs, nfs, tfs state The desired status of the described mount (choice). - If *state=mounted* and *src* are not in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will be updated, the device will be mounted and the module will complete successfully with *changed=True*. + If \ :emphasis:`state=mounted`\ and \ :emphasis:`src`\ are not in use, the module will add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The \ :emphasis:`path`\ will be updated, the device will be mounted and the module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=mounted* and *src* are in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will not be updated, the device will not be mounted and the module will complete successfully with *changed=False*. + If \ :emphasis:`state=mounted`\ and \ :emphasis:`src`\ are in use, the module will add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The \ :emphasis:`path`\ will not be updated, the device will not be mounted and the module will complete successfully with \ :emphasis:`changed=False`\ . - If *state=unmounted* and *src* are in use, the module will **not** add the file system entry to the parmlib member *persistent/data_store*. The device will be unmounted and the module will complete successfully with *changed=True*. + If \ :emphasis:`state=unmounted`\ and \ :emphasis:`src`\ are in use, the module will \ :strong:`not`\ add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ . The device will be unmounted and the module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=unmounted* and *src* are not in use, the module will **not** add the file system entry to parmlib member *persistent/data_store*.The device will remain unchanged and the module will complete with *changed=False*. + If \ :emphasis:`state=unmounted`\ and \ :emphasis:`src`\ are not in use, the module will \ :strong:`not`\ add the file system entry to parmlib member \ :emphasis:`persistent/data\_store`\ .The device will remain unchanged and the module will complete with \ :emphasis:`changed=False`\ . - If *state=present*, the module will add the file system entry to the provided parmlib member *persistent/data_store* if not present. The module will complete successfully with *changed=True*. + If \ :emphasis:`state=present`\ , the module will add the file system entry to the provided parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=absent*, the module will remove the file system entry to the provided parmlib member *persistent/data_store* if present. The module will complete successfully with *changed=True*. + If \ :emphasis:`state=absent`\ , the module will remove the file system entry to the provided parmlib member \ :emphasis:`persistent/data\_store`\ if present. The module will complete successfully with \ :emphasis:`changed=True`\ . - If *state=remounted*, the module will **not** add the file system entry to parmlib member *persistent/data_store*. The device will be unmounted and mounted, the module will complete successfully with *changed=True*. + If \ :emphasis:`state=remounted`\ , the module will \ :strong:`not`\ add the file system entry to parmlib member \ :emphasis:`persistent/data\_store`\ . The device will be unmounted and mounted, the module will complete successfully with \ :emphasis:`changed=True`\ . | **required**: False @@ -91,7 +91,7 @@ state persistent - Add or remove mount command entries to provided *data_store* + Add or remove mount command entries to provided \ :emphasis:`data\_store`\ | **required**: False | **type**: dict @@ -105,9 +105,9 @@ persistent backup - Creates a backup file or backup data set for *data_store*, including the timestamp information to ensure that you retrieve the original parameters defined in *data_store*. + Creates a backup file or backup data set for \ :emphasis:`data\_store`\ , including the timestamp information to ensure that you retrieve the original parameters defined in \ :emphasis:`data\_store`\ . - *backup_name* can be used to specify a backup file name if *backup=true*. + \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -119,11 +119,11 @@ persistent backup_name Specify the USS file name or data set name for the destination backup. - If the source *data_store* is a USS file or path, the *backup_name* name can be relative or absolute for file or path name. + If the source \ :emphasis:`data\_store`\ is a USS file or path, the \ :emphasis:`backup\_name`\ name can be relative or absolute for file or path name. - If the source is an MVS data set, the backup_name must be an MVS data set name. + If the source is an MVS data set, the backup\_name must be an MVS data set name. - If the backup_name is not provided, the default *backup_name* will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + If the backup\_name is not provided, the default \ :emphasis:`backup\_name`\ will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -132,9 +132,9 @@ persistent comment - If provided, this is used as a comment that surrounds the command in the *persistent/data_store* + If provided, this is used as a comment that surrounds the command in the \ :emphasis:`persistent/data\_store`\ - Comments are used to encapsulate the *persistent/data_store* entry such that they can easily be understood and located. + Comments are used to encapsulate the \ :emphasis:`persistent/data\_store`\ entry such that they can easily be understood and located. | **required**: False | **type**: list @@ -145,29 +145,29 @@ persistent unmount_opts Describes how the unmount will be performed. - For more on coded character set identifiers, review the IBM documentation topic **UNMOUNT - Remove a file system from the file hierarchy**. + For more on coded character set identifiers, review the IBM documentation topic \ :strong:`UNMOUNT - Remove a file system from the file hierarchy`\ . | **required**: False | **type**: str - | **default**: NORMAL - | **choices**: DRAIN, FORCE, IMMEDIATE, NORMAL, REMOUNT, RESET + | **default**: normal + | **choices**: drain, force, immediate, normal, remount, reset mount_opts Options available to the mount. - If *mount_opts=RO* on a mounted/remount, mount is performed read-only. + If \ :emphasis:`mount\_opts=ro`\ on a mounted/remount, mount is performed read-only. - If *mount_opts=SAME* and (unmount_opts=REMOUNT), mount is opened in the same mode as previously opened. + If \ :emphasis:`mount\_opts=same`\ and (unmount\_opts=remount), mount is opened in the same mode as previously opened. - If *mount_opts=NOWAIT*, mount is performed asynchronously. + If \ :emphasis:`mount\_opts=nowait`\ , mount is performed asynchronously. - If *mount_opts=NOSECURITY*, security checks are not enforced for files in this file system. + If \ :emphasis:`mount\_opts=nosecurity`\ , security checks are not enforced for files in this file system. | **required**: False | **type**: str - | **default**: RW - | **choices**: RO, RW, SAME, NOWAIT, NOSECURITY + | **default**: rw + | **choices**: ro, rw, same, nowait, nosecurity src_params @@ -184,27 +184,27 @@ tag_untagged When the file system is unmounted, the tags are lost. - If *tag_untagged=NOTEXT* none of the untagged files in the file system are automatically converted during file reading and writing. + If \ :emphasis:`tag\_untagged=notext`\ none of the untagged files in the file system are automatically converted during file reading and writing. - If *tag_untagged=TEXT* each untagged file is implicitly marked as containing pure text data that can be converted. + If \ :emphasis:`tag\_untagged=text`\ each untagged file is implicitly marked as containing pure text data that can be converted. - If this flag is used, use of tag_ccsid is encouraged. + If this flag is used, use of tag\_ccsid is encouraged. | **required**: False | **type**: str - | **choices**: TEXT, NOTEXT + | **choices**: text, notext tag_ccsid Identifies the coded character set identifier (ccsid) to be implicitly set for the untagged file. - For more on coded character set identifiers, review the IBM documentation topic **Coded Character Sets**. + For more on coded character set identifiers, review the IBM documentation topic \ :strong:`Coded Character Sets`\ . Specified as a decimal value from 0 to 65535. However, when TEXT is specified, the value must be between 0 and 65535. The value is not checked as being valid and the corresponding code page is not checked as being installed. - Required when *tag_untagged=TEXT*. + Required when \ :emphasis:`tag\_untagged=TEXT`\ . | **required**: False | **type**: int @@ -214,10 +214,10 @@ allow_uid Specifies whether the SETUID and SETGID mode bits on an executable in this file system are considered. Also determines whether the APF extended attribute or the Program Control extended attribute is honored. - If *allow_uid=True* the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. + If \ :emphasis:`allow\_uid=True`\ the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. - If *allow_uid=False* the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. + If \ :emphasis:`allow\_uid=False`\ the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. | **required**: False @@ -226,10 +226,10 @@ allow_uid sysname - For systems participating in shared file system, *sysname* specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). + For systems participating in shared file system, \ :emphasis:`sysname`\ specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). - *sysname* is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. + \ :emphasis:`sysname`\ is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. | **required**: False @@ -240,23 +240,23 @@ automove These parameters apply only in a sysplex where systems are exploiting the shared file system capability. They specify what happens to the ownership of a file system when a shutdown, PFS termination, dead system takeover, or file system move occurs. The default setting is AUTOMOVE where the file system will be randomly moved to another system (no system list used). - *automove=AUTOMOVE* indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. + \ :emphasis:`automove=automove`\ indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. - *automove=NOAUTOMOVE* prevents movement of the file system's ownership in some situations. + \ :emphasis:`automove=noautomove`\ prevents movement of the file system's ownership in some situations. - *automove=UNMOUNT* allows the file system to be unmounted in some situations. + \ :emphasis:`automove=unmount`\ allows the file system to be unmounted in some situations. | **required**: False | **type**: str - | **default**: AUTOMOVE - | **choices**: AUTOMOVE, NOAUTOMOVE, UNMOUNT + | **default**: automove + | **choices**: automove, noautomove, unmount automove_list - If(automove=AUTOMOVE), this option will be checked. + If(automove=automove), this option will be checked. This specifies the list of servers to include or exclude as destinations. @@ -275,7 +275,7 @@ automove_list tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -293,14 +293,14 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted - name: Unmount a filesystem. zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: unmounted unmount_opts: REMOUNT opts: same @@ -309,7 +309,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: RO @@ -317,7 +317,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -327,7 +327,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -339,7 +339,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted allow_uid: no @@ -347,7 +347,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted opts: nowait @@ -355,7 +355,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: NOSECURITY @@ -363,7 +363,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: I,DEV1,DEV2,DEV3,DEV9 @@ -372,7 +372,7 @@ Examples zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: EXCLUDE,DEV4,DEV5,DEV6,DEV7 @@ -389,7 +389,7 @@ Notes If an uncataloged data set needs to be fetched, it should be cataloged first. - Uncataloged data sets can be cataloged using the `zos_data_set <./zos_data_set.html>`_ module. + Uncataloged data sets can be cataloged using the \ `zos\_data\_set <./zos_data_set.html>`__\ module. @@ -467,7 +467,7 @@ persistent | **sample**: SYS1.FILESYS(PRMAABAK) comment - The text that was used in markers around the *Persistent/data_store* entry. + The text that was used in markers around the \ :emphasis:`Persistent/data\_store`\ entry. | **returned**: always | **type**: list @@ -529,7 +529,7 @@ allow_uid true sysname - *sysname* specifies the particular system on which a mount should be performed. + \ :emphasis:`sysname`\ specifies the particular system on which a mount should be performed. | **returned**: if Non-None | **type**: str diff --git a/docs/source/modules/zos_mvs_raw.rst b/docs/source/modules/zos_mvs_raw.rst index 3ebedadd5..c0551786e 100644 --- a/docs/source/modules/zos_mvs_raw.rst +++ b/docs/source/modules/zos_mvs_raw.rst @@ -45,9 +45,9 @@ parm auth Determines whether this program should run with authorized privileges. - If *auth=true*, the program runs as APF authorized. + If \ :emphasis:`auth=true`\ , the program runs as APF authorized. - If *auth=false*, the program runs as unauthorized. + If \ :emphasis:`auth=false`\ , the program runs as unauthorized. | **required**: False | **type**: bool @@ -57,7 +57,7 @@ auth verbose Determines if verbose output should be returned from the underlying utility used by this module. - When *verbose=true* verbose output is returned on module failure. + When \ :emphasis:`verbose=true`\ verbose output is returned on module failure. | **required**: False | **type**: bool @@ -67,19 +67,19 @@ verbose dds The input data source. - *dds* supports 6 types of sources + \ :emphasis:`dds`\ supports 6 types of sources - 1. *dd_data_set* for data set files. + 1. \ :emphasis:`dd\_data\_set`\ for data set files. - 2. *dd_unix* for UNIX files. + 2. \ :emphasis:`dd\_unix`\ for UNIX files. - 3. *dd_input* for in-stream data set. + 3. \ :emphasis:`dd\_input`\ for in-stream data set. - 4. *dd_dummy* for no content input. + 4. \ :emphasis:`dd\_dummy`\ for no content input. - 5. *dd_concat* for a data set concatenation. + 5. \ :emphasis:`dd\_concat`\ for a data set concatenation. - 6. *dds* supports any combination of source types. + 6. \ :emphasis:`dds`\ supports any combination of source types. | **required**: False | **type**: list @@ -89,7 +89,7 @@ dds dd_data_set Specify a data set. - *dd_data_set* can reference an existing data set or be used to define a new data set to be created during execution. + \ :emphasis:`dd\_data\_set`\ can reference an existing data set or be used to define a new data set to be created during execution. | **required**: False | **type**: dict @@ -110,7 +110,7 @@ dds type - The data set type. Only required when *disposition=new*. + The data set type. Only required when \ :emphasis:`disposition=new`\ . Maps to DSNTYPE on z/OS. @@ -120,7 +120,7 @@ dds disposition - *disposition* indicates the status of a data set. + \ :emphasis:`disposition`\ indicates the status of a data set. Defaults to shr. @@ -130,31 +130,31 @@ dds disposition_normal - *disposition_normal* indicates what to do with the data set after a normal termination of the program. + \ :emphasis:`disposition\_normal`\ indicates what to do with the data set after a normal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog disposition_abnormal - *disposition_abnormal* indicates what to do with the data set after an abnormal termination of the program. + \ :emphasis:`disposition\_abnormal`\ indicates what to do with the data set after an abnormal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog reuse - Determines if a data set should be reused if *disposition=NEW* and if a data set with a matching name already exists. + Determines if a data set should be reused if \ :emphasis:`disposition=new`\ and if a data set with a matching name already exists. - If *reuse=true*, *disposition* will be automatically switched to ``SHR``. + If \ :emphasis:`reuse=true`\ , \ :emphasis:`disposition`\ will be automatically switched to \ :literal:`SHR`\ . - If *reuse=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`reuse=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *replace*. + Mutually exclusive with \ :emphasis:`replace`\ . - *reuse* is only considered when *disposition=NEW* + \ :emphasis:`reuse`\ is only considered when \ :emphasis:`disposition=new`\ | **required**: False | **type**: bool @@ -162,17 +162,17 @@ dds replace - Determines if a data set should be replaced if *disposition=NEW* and a data set with a matching name already exists. + Determines if a data set should be replaced if \ :emphasis:`disposition=new`\ and a data set with a matching name already exists. - If *replace=true*, the original data set will be deleted, and a new data set created. + If \ :emphasis:`replace=true`\ , the original data set will be deleted, and a new data set created. - If *replace=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`replace=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *reuse*. + Mutually exclusive with \ :emphasis:`reuse`\ . - *replace* is only considered when *disposition=NEW* + \ :emphasis:`replace`\ is only considered when \ :emphasis:`disposition=new`\ - *replace* will result in loss of all data in the original data set unless *backup* is specified. + \ :emphasis:`replace`\ will result in loss of all data in the original data set unless \ :emphasis:`backup`\ is specified. | **required**: False | **type**: bool @@ -180,9 +180,9 @@ dds backup - Determines if a backup should be made of an existing data set when *disposition=NEW*, *replace=true*, and a data set with the desired name is found. + Determines if a backup should be made of an existing data set when \ :emphasis:`disposition=new`\ , \ :emphasis:`replace=true`\ , and a data set with the desired name is found. - *backup* is only used when *replace=true*. + \ :emphasis:`backup`\ is only used when \ :emphasis:`replace=true`\ . | **required**: False | **type**: bool @@ -190,7 +190,7 @@ dds space_type - The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. + The unit of measurement to use when allocating space for a new data set using \ :emphasis:`space\_primary`\ and \ :emphasis:`space\_secondary`\ . | **required**: False | **type**: str @@ -200,9 +200,9 @@ dds space_primary The primary amount of space to allocate for a new data set. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -211,9 +211,9 @@ dds space_secondary When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -231,7 +231,7 @@ dds sms_management_class The desired management class for a new SMS-managed data set. - *sms_management_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_management\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -242,7 +242,7 @@ dds sms_storage_class The desired storage class for a new SMS-managed data set. - *sms_storage_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_storage\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -253,7 +253,7 @@ dds sms_data_class The desired data class for a new SMS-managed data set. - *sms_data_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_data\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -264,7 +264,7 @@ dds block_size The maximum length of a block in bytes. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -280,9 +280,9 @@ dds key_label The label for the encryption key used by the system to encrypt the data set. - *key_label* is the public name of a protected encryption key in the ICSF key repository. + \ :emphasis:`key\_label`\ is the public name of a protected encryption key in the ICSF key repository. - *key_label* should only be provided when creating an extended format data set. + \ :emphasis:`key\_label`\ should only be provided when creating an extended format data set. Maps to DSKEYLBL on z/OS. @@ -304,7 +304,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB1 on z/OS. @@ -313,9 +313,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD1 on z/OS. @@ -339,7 +339,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB2 on z/OS. @@ -348,9 +348,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD2 on z/OS. @@ -363,7 +363,7 @@ dds key_length The length of the keys used in a new data set. - If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. + If using SMS, setting \ :emphasis:`key\_length`\ overrides the key length defined in the SMS data class of the data set. Valid values are (0-255 non-vsam), (1-255 vsam). @@ -376,20 +376,20 @@ dds The first byte of a logical record is position 0. - Provide *key_offset* only for VSAM key-sequenced data sets. + Provide \ :emphasis:`key\_offset`\ only for VSAM key-sequenced data sets. | **required**: False | **type**: int record_length - The logical record length. (e.g ``80``). + The logical record length. (e.g \ :literal:`80`\ ). For variable data sets, the length must include the 4-byte prefix area. Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - Valid values are (1-32760 for non-vsam, 1-32761 for vsam). + Valid values are (1-32760 for non-VSAM, 1-32761 for VSAM). Maps to LRECL on z/OS. @@ -417,11 +417,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -463,7 +463,7 @@ dds path The path to an existing UNIX file. - Or provide the path to an new created UNIX file when *status_group=OCREAT*. + Or provide the path to an new created UNIX file when \ :emphasis:`status\_group=OCREAT`\ . The provided path must be absolute. @@ -488,7 +488,7 @@ dds mode - The file access attributes when the UNIX file is created specified in *path*. + The file access attributes when the UNIX file is created specified in \ :emphasis:`path`\ . Specify the mode as an octal number similarly to chmod. @@ -499,47 +499,47 @@ dds status_group - The status for the UNIX file specified in *path*. + The status for the UNIX file specified in \ :emphasis:`path`\ . - If you do not specify a value for the *status_group* parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + If you do not specify a value for the \ :emphasis:`status\_group`\ parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. Maps to PATHOPTS status group file options on z/OS. You can specify up to 6 choices. - *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. + \ :emphasis:`oappend`\ sets the file offset to the end of the file before each write, so that data is written at the end of the file. - *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. + \ :emphasis:`ocreat`\ specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and \ :emphasis:`oexcl`\ was not specified, the system allows the program to use the existing file. If the file already exists and \ :emphasis:`oexcl`\ was specified, the system fails the allocation and the job step. - *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. + \ :emphasis:`oexcl`\ specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores \ :emphasis:`oexcl`\ if \ :emphasis:`ocreat`\ is not also specified. - *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + \ :emphasis:`onoctty`\ specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - *ononblock* specifies the following, depending on the type of file + \ :emphasis:`ononblock`\ specifies the following, depending on the type of file For a FIFO special file - 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. + 1. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`ordonly`\ access, an open function for reading-only returns without delay. - 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. + 2. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`ordonly`\ access, an open function for reading-only blocks (waits) until a process opens the file for writing. - 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. + 3. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`owronly`\ access, an open function for writing-only returns an error if no process currently has the file open for reading. - 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. + 4. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`owronly`\ access, an open function for writing-only blocks (waits) until a process opens the file for reading. 5. For a character special file that supports nonblocking open - 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + 6. If \ :emphasis:`ononblock`\ is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. + 7. If \ :emphasis:`ononblock`\ is not specified, an open function blocks (waits) until the device is ready or available. - *ononblock* has no effect on other file types. + \ :emphasis:`ononblock`\ has no effect on other file types. - *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + \ :emphasis:`osync`\ specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. + \ :emphasis:`otrunc`\ specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with \ :emphasis:`ordwr`\ or \ :emphasis:`owronly`\ . - When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. + When \ :emphasis:`otrunc`\ is specified, the system does not change the mode and owner. \ :emphasis:`otrunc`\ has no effect on FIFO special files or character special files. | **required**: False | **type**: list @@ -548,7 +548,7 @@ dds access_group - The kind of access to request for the UNIX file specified in *path*. + The kind of access to request for the UNIX file specified in \ :emphasis:`path`\ . | **required**: False | **type**: str @@ -556,7 +556,7 @@ dds file_data_type - The type of data that is (or will be) stored in the file specified in *path*. + The type of data that is (or will be) stored in the file specified in \ :emphasis:`path`\ . Maps to FILEDATA on z/OS. @@ -569,7 +569,7 @@ dds block_size The block size, in bytes, for the UNIX file. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -578,7 +578,7 @@ dds record_length The logical record length for the UNIX file. - *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_length`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. Maps to LRECL on z/OS. @@ -589,7 +589,7 @@ dds record_format The record format for the UNIX file. - *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_format`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. | **required**: False | **type**: str @@ -608,11 +608,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -638,7 +638,7 @@ dds dd_input - *dd_input* is used to specify an in-stream data set. + \ :emphasis:`dd\_input`\ is used to specify an in-stream data set. Input will be saved to a temporary data set with a record length of 80. @@ -656,15 +656,15 @@ dds content The input contents for the DD. - *dd_input* supports single or multiple lines of input. + \ :emphasis:`dd\_input`\ supports single or multiple lines of input. Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. If a list of strings is provided, newlines will be added to each of the lines when used as input. - If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. + If a multi-line string is provided, use the proper block scalar style. YAML supports both \ `literal `__\ and \ `folded `__\ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; \ :emphasis:`content: | 2`\ is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block \ `chomping `__\ indicators "+" and "-" as well. - When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. + When using the \ :emphasis:`content`\ option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all \ :emphasis:`content`\ types; string, list of strings and when using a YAML block indicator. | **required**: True | **type**: raw @@ -682,11 +682,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -696,7 +696,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for *dd_input*, *src_encoding* should generally not need to be changed. + for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. | **required**: False | **type**: str @@ -714,7 +714,7 @@ dds dd_output - Use *dd_output* to specify - Content sent to the DD should be returned to the user. + Use \ :emphasis:`dd\_output`\ to specify - Content sent to the DD should be returned to the user. | **required**: False | **type**: dict @@ -739,11 +739,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -753,7 +753,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for *dd_input*, *src_encoding* should generally not need to be changed. + for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. | **required**: False | **type**: str @@ -771,9 +771,9 @@ dds dd_dummy - Use *dd_dummy* to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. + Use \ :emphasis:`dd\_dummy`\ to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. - *dd_dummy* accepts no content input. + \ :emphasis:`dd\_dummy`\ accepts no content input. | **required**: False | **type**: dict @@ -788,7 +788,7 @@ dds dd_vio - *dd_vio* is used to handle temporary data sets. + \ :emphasis:`dd\_vio`\ is used to handle temporary data sets. VIO data sets reside in the paging space; but, to the problem program and the access method, the data sets appear to reside on a direct access storage device. @@ -807,7 +807,7 @@ dds dd_concat - *dd_concat* is used to specify a data set concatenation. + \ :emphasis:`dd\_concat`\ is used to specify a data set concatenation. | **required**: False | **type**: dict @@ -821,7 +821,7 @@ dds dds - A list of DD statements, which can contain any of the following types: *dd_data_set*, *dd_unix*, and *dd_input*. + A list of DD statements, which can contain any of the following types: \ :emphasis:`dd\_data\_set`\ , \ :emphasis:`dd\_unix`\ , and \ :emphasis:`dd\_input`\ . | **required**: False | **type**: list @@ -831,7 +831,7 @@ dds dd_data_set Specify a data set. - *dd_data_set* can reference an existing data set. The data set referenced with ``data_set_name`` must be allocated before the module `zos_mvs_raw <./zos_mvs_raw.html>`_ is run, you can use `zos_data_set <./zos_data_set.html>`_ to allocate a data set. + \ :emphasis:`dd\_data\_set`\ can reference an existing data set. The data set referenced with \ :literal:`data\_set\_name`\ must be allocated before the module \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ is run, you can use \ `zos\_data\_set <./zos_data_set.html>`__\ to allocate a data set. | **required**: False | **type**: dict @@ -845,7 +845,7 @@ dds type - The data set type. Only required when *disposition=new*. + The data set type. Only required when \ :emphasis:`disposition=new`\ . Maps to DSNTYPE on z/OS. @@ -855,7 +855,7 @@ dds disposition - *disposition* indicates the status of a data set. + \ :emphasis:`disposition`\ indicates the status of a data set. Defaults to shr. @@ -865,31 +865,31 @@ dds disposition_normal - *disposition_normal* indicates what to do with the data set after normal termination of the program. + \ :emphasis:`disposition\_normal`\ indicates what to do with the data set after normal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog disposition_abnormal - *disposition_abnormal* indicates what to do with the data set after abnormal termination of the program. + \ :emphasis:`disposition\_abnormal`\ indicates what to do with the data set after abnormal termination of the program. | **required**: False | **type**: str - | **choices**: delete, keep, catlg, catalog, uncatlg, uncatalog + | **choices**: delete, keep, catalog, uncatalog reuse - Determines if data set should be reused if *disposition=NEW* and a data set with matching name already exists. + Determines if data set should be reused if \ :emphasis:`disposition=new`\ and a data set with matching name already exists. - If *reuse=true*, *disposition* will be automatically switched to ``SHR``. + If \ :emphasis:`reuse=true`\ , \ :emphasis:`disposition`\ will be automatically switched to \ :literal:`SHR`\ . - If *reuse=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`reuse=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *replace*. + Mutually exclusive with \ :emphasis:`replace`\ . - *reuse* is only considered when *disposition=NEW* + \ :emphasis:`reuse`\ is only considered when \ :emphasis:`disposition=new`\ | **required**: False | **type**: bool @@ -897,17 +897,17 @@ dds replace - Determines if data set should be replaced if *disposition=NEW* and a data set with matching name already exists. + Determines if data set should be replaced if \ :emphasis:`disposition=new`\ and a data set with matching name already exists. - If *replace=true*, the original data set will be deleted, and a new data set created. + If \ :emphasis:`replace=true`\ , the original data set will be deleted, and a new data set created. - If *replace=false*, and a data set with a matching name already exists, allocation will fail. + If \ :emphasis:`replace=false`\ , and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with *reuse*. + Mutually exclusive with \ :emphasis:`reuse`\ . - *replace* is only considered when *disposition=NEW* + \ :emphasis:`replace`\ is only considered when \ :emphasis:`disposition=new`\ - *replace* will result in loss of all data in the original data set unless *backup* is specified. + \ :emphasis:`replace`\ will result in loss of all data in the original data set unless \ :emphasis:`backup`\ is specified. | **required**: False | **type**: bool @@ -915,9 +915,9 @@ dds backup - Determines if a backup should be made of existing data set when *disposition=NEW*, *replace=true*, and a data set with the desired name is found. + Determines if a backup should be made of existing data set when \ :emphasis:`disposition=new`\ , \ :emphasis:`replace=true`\ , and a data set with the desired name is found. - *backup* is only used when *replace=true*. + \ :emphasis:`backup`\ is only used when \ :emphasis:`replace=true`\ . | **required**: False | **type**: bool @@ -925,7 +925,7 @@ dds space_type - The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. + The unit of measurement to use when allocating space for a new data set using \ :emphasis:`space\_primary`\ and \ :emphasis:`space\_secondary`\ . | **required**: False | **type**: str @@ -935,9 +935,9 @@ dds space_primary The primary amount of space to allocate for a new data set. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -946,9 +946,9 @@ dds space_secondary When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - The value provided to *space_type* is used as the unit of space for the allocation. + The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. - Not applicable when *space_type=blklgth* or *space_type=reclgth*. + Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . | **required**: False | **type**: int @@ -966,7 +966,7 @@ dds sms_management_class The desired management class for a new SMS-managed data set. - *sms_management_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_management\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -977,7 +977,7 @@ dds sms_storage_class The desired storage class for a new SMS-managed data set. - *sms_storage_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_storage\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -988,7 +988,7 @@ dds sms_data_class The desired data class for a new SMS-managed data set. - *sms_data_class* is ignored if specified for an existing data set. + \ :emphasis:`sms\_data\_class`\ is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -999,7 +999,7 @@ dds block_size The maximum length of a block in bytes. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -1015,9 +1015,9 @@ dds key_label The label for the encryption key used by the system to encrypt the data set. - *key_label* is the public name of a protected encryption key in the ICSF key repository. + \ :emphasis:`key\_label`\ is the public name of a protected encryption key in the ICSF key repository. - *key_label* should only be provided when creating an extended format data set. + \ :emphasis:`key\_label`\ should only be provided when creating an extended format data set. Maps to DSKEYLBL on z/OS. @@ -1039,7 +1039,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB1 on z/OS. @@ -1048,9 +1048,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD1 on z/OS. @@ -1074,7 +1074,7 @@ dds Key label must have a private key associated with it. - *label* can be a maximum of 64 characters. + \ :emphasis:`label`\ can be a maximum of 64 characters. Maps to KEYLAB2 on z/OS. @@ -1083,9 +1083,9 @@ dds encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. - *encoding* can either be set to ``L`` for label encoding, or ``H`` for hash encoding. + \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. Maps to KEYCD2 on z/OS. @@ -1098,7 +1098,7 @@ dds key_length The length of the keys used in a new data set. - If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. + If using SMS, setting \ :emphasis:`key\_length`\ overrides the key length defined in the SMS data class of the data set. Valid values are (0-255 non-vsam), (1-255 vsam). @@ -1111,14 +1111,14 @@ dds The first byte of a logical record is position 0. - Provide *key_offset* only for VSAM key-sequenced data sets. + Provide \ :emphasis:`key\_offset`\ only for VSAM key-sequenced data sets. | **required**: False | **type**: int record_length - The logical record length. (e.g ``80``). + The logical record length. (e.g \ :literal:`80`\ ). For variable data sets, the length must include the 4-byte prefix area. @@ -1152,11 +1152,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -1191,7 +1191,7 @@ dds path The path to an existing UNIX file. - Or provide the path to an new created UNIX file when *status_group=OCREAT*. + Or provide the path to an new created UNIX file when \ :emphasis:`status\_group=ocreat`\ . The provided path must be absolute. @@ -1216,7 +1216,7 @@ dds mode - The file access attributes when the UNIX file is created specified in *path*. + The file access attributes when the UNIX file is created specified in \ :emphasis:`path`\ . Specify the mode as an octal number similar to chmod. @@ -1227,47 +1227,47 @@ dds status_group - The status for the UNIX file specified in *path*. + The status for the UNIX file specified in \ :emphasis:`path`\ . - If you do not specify a value for the *status_group* parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + If you do not specify a value for the \ :emphasis:`status\_group`\ parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. Maps to PATHOPTS status group file options on z/OS. You can specify up to 6 choices. - *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. + \ :emphasis:`oappend`\ sets the file offset to the end of the file before each write, so that data is written at the end of the file. - *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. + \ :emphasis:`ocreat`\ specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and \ :emphasis:`oexcl`\ was not specified, the system allows the program to use the existing file. If the file already exists and \ :emphasis:`oexcl`\ was specified, the system fails the allocation and the job step. - *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. + \ :emphasis:`oexcl`\ specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores \ :emphasis:`oexcl`\ if \ :emphasis:`ocreat`\ is not also specified. - *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + \ :emphasis:`onoctty`\ specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - *ononblock* specifies the following, depending on the type of file + \ :emphasis:`ononblock`\ specifies the following, depending on the type of file For a FIFO special file - 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. + 1. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`ordonly`\ access, an open function for reading-only returns without delay. - 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. + 2. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`ordonly`\ access, an open function for reading-only blocks (waits) until a process opens the file for writing. - 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. + 3. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`owronly`\ access, an open function for writing-only returns an error if no process currently has the file open for reading. - 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. + 4. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`owronly`\ access, an open function for writing-only blocks (waits) until a process opens the file for reading. 5. For a character special file that supports nonblocking open - 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + 6. If \ :emphasis:`ononblock`\ is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. + 7. If \ :emphasis:`ononblock`\ is not specified, an open function blocks (waits) until the device is ready or available. - *ononblock* has no effect on other file types. + \ :emphasis:`ononblock`\ has no effect on other file types. - *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + \ :emphasis:`osync`\ specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. + \ :emphasis:`otrunc`\ specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with \ :emphasis:`ordwr`\ or \ :emphasis:`owronly`\ . - When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. + When \ :emphasis:`otrunc`\ is specified, the system does not change the mode and owner. \ :emphasis:`otrunc`\ has no effect on FIFO special files or character special files. | **required**: False | **type**: list @@ -1276,7 +1276,7 @@ dds access_group - The kind of access to request for the UNIX file specified in *path*. + The kind of access to request for the UNIX file specified in \ :emphasis:`path`\ . | **required**: False | **type**: str @@ -1284,7 +1284,7 @@ dds file_data_type - The type of data that is (or will be) stored in the file specified in *path*. + The type of data that is (or will be) stored in the file specified in \ :emphasis:`path`\ . Maps to FILEDATA on z/OS. @@ -1297,7 +1297,7 @@ dds block_size The block size, in bytes, for the UNIX file. - Default is dependent on *record_format* + Default is dependent on \ :emphasis:`record\_format`\ | **required**: False | **type**: int @@ -1306,7 +1306,7 @@ dds record_length The logical record length for the UNIX file. - *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_length`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. Maps to LRECL on z/OS. @@ -1317,7 +1317,7 @@ dds record_format The record format for the UNIX file. - *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + \ :emphasis:`record\_format`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. | **required**: False | **type**: str @@ -1336,11 +1336,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -1366,7 +1366,7 @@ dds dd_input - *dd_input* is used to specify an in-stream data set. + \ :emphasis:`dd\_input`\ is used to specify an in-stream data set. Input will be saved to a temporary data set with a record length of 80. @@ -1377,15 +1377,15 @@ dds content The input contents for the DD. - *dd_input* supports single or multiple lines of input. + \ :emphasis:`dd\_input`\ supports single or multiple lines of input. Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. If a list of strings is provided, newlines will be added to each of the lines when used as input. - If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. + If a multi-line string is provided, use the proper block scalar style. YAML supports both \ `literal `__\ and \ `folded `__\ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; \ :emphasis:`content: | 2`\ is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block \ `chomping `__\ indicators "+" and "-" as well. - When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. + When using the \ :emphasis:`content`\ option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all \ :emphasis:`content`\ types; string, list of strings and when using a YAML block indicator. | **required**: True | **type**: raw @@ -1403,11 +1403,11 @@ dds type The type of the content to be returned. - ``text`` means return content in encoding specified by *response_encoding*. + \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . - *src_encoding* and *response_encoding* are only used when *type=text*. + \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . - ``base64`` means return content in binary mode. + \ :literal:`base64`\ means return content in binary mode. | **required**: True | **type**: str @@ -1417,7 +1417,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for *dd_input*, *src_encoding* should generally not need to be changed. + for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. | **required**: False | **type**: str @@ -1440,7 +1440,7 @@ dds tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -1756,11 +1756,11 @@ Notes ----- .. note:: - When executing programs using `zos_mvs_raw <./zos_mvs_raw.html>`_, you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. + When executing programs using \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ , you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. - 1. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. + 1. \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. - 2. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. + 2. \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. 3. When executing a program, refer to the programs documentation as each programs requirments can vary fom DDs, instream-data indentation and continuation characters. @@ -1838,7 +1838,7 @@ backups | **type**: str backup_name - The name of the data set containing the backup of content from data set in original_name. + The name of the data set containing the backup of content from data set in original\_name. | **type**: str diff --git a/docs/source/modules/zos_operator.rst b/docs/source/modules/zos_operator.rst index 9ad26d64c..ff1e5fe87 100644 --- a/docs/source/modules/zos_operator.rst +++ b/docs/source/modules/zos_operator.rst @@ -52,7 +52,7 @@ wait_time_s This option is helpful on a busy system requiring more time to execute commands. - Setting *wait* can instruct if execution should wait the full *wait_time_s*. + Setting \ :emphasis:`wait`\ can instruct if execution should wait the full \ :emphasis:`wait\_time\_s`\ . | **required**: False | **type**: int diff --git a/docs/source/modules/zos_operator_action_query.rst b/docs/source/modules/zos_operator_action_query.rst index b2e99d399..a03a17fdc 100644 --- a/docs/source/modules/zos_operator_action_query.rst +++ b/docs/source/modules/zos_operator_action_query.rst @@ -31,7 +31,7 @@ system If the system name is not specified, all outstanding messages for that system and for the local systems attached to it are returned. - A trailing asterisk, (*) wildcard is supported. + A trailing asterisk, (\*) wildcard is supported. | **required**: False | **type**: str @@ -42,7 +42,7 @@ message_id If the message identifier is not specified, all outstanding messages for all message identifiers are returned. - A trailing asterisk, (*) wildcard is supported. + A trailing asterisk, (\*) wildcard is supported. | **required**: False | **type**: str @@ -53,7 +53,7 @@ job_name If the message job name is not specified, all outstanding messages for all job names are returned. - A trailing asterisk, (*) wildcard is supported. + A trailing asterisk, (\*) wildcard is supported. | **required**: False | **type**: str @@ -69,24 +69,24 @@ message_filter filter - Specifies the substring or regex to match to the outstanding messages, see *use_regex*. + Specifies the substring or regex to match to the outstanding messages, see \ :emphasis:`use\_regex`\ . All special characters in a filter string that are not a regex are escaped. - Valid Python regular expressions are supported. See `the official documentation `_ for more information. + Valid Python regular expressions are supported. See \ `the official documentation `__\ for more information. - Regular expressions are compiled with the flag **re.DOTALL** which makes the **'.'** special character match any character including a newline." + Regular expressions are compiled with the flag \ :strong:`re.DOTALL`\ which makes the \ :strong:`'.'`\ special character match any character including a newline." | **required**: True | **type**: str use_regex - Indicates that the value for *filter* is a regex or a string to match. + Indicates that the value for \ :emphasis:`filter`\ is a regex or a string to match. - If False, the module assumes that *filter* is not a regex and matches the *filter* substring on the outstanding messages. + If False, the module assumes that \ :emphasis:`filter`\ is not a regex and matches the \ :emphasis:`filter`\ substring on the outstanding messages. - If True, the module creates a regex from the *filter* string and matches it to the outstanding messages. + If True, the module creates a regex from the \ :emphasis:`filter`\ string and matches it to the outstanding messages. | **required**: False | **type**: bool @@ -222,7 +222,7 @@ actions | **sample**: STC01537 message_text - Content of the outstanding message requiring operator action awaiting a reply. If *message_filter* is set, *message_text* will be filtered accordingly. + Content of the outstanding message requiring operator action awaiting a reply. If \ :emphasis:`message\_filter`\ is set, \ :emphasis:`message\_text`\ will be filtered accordingly. | **returned**: success | **type**: str diff --git a/docs/source/modules/zos_ping.rst b/docs/source/modules/zos_ping.rst index a4405b473..acb901790 100644 --- a/docs/source/modules/zos_ping.rst +++ b/docs/source/modules/zos_ping.rst @@ -16,9 +16,9 @@ zos_ping -- Ping z/OS and check dependencies. Synopsis -------- -- `zos_ping <./zos_ping.html>`_ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. -- `zos_ping <./zos_ping.html>`_ returns ``pong`` when the target host is not missing any required dependencies. -- If the target host is missing optional dependencies, the `zos_ping <./zos_ping.html>`_ will return one or more warning messages. +- \ `zos\_ping <./zos_ping.html>`__\ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. +- \ `zos\_ping <./zos_ping.html>`__\ returns \ :literal:`pong`\ when the target host is not missing any required dependencies. +- If the target host is missing optional dependencies, the \ `zos\_ping <./zos_ping.html>`__\ will return one or more warning messages. - If a required dependency is missing from the target host, an explanatory message will be returned with the module failure. @@ -44,7 +44,7 @@ Notes ----- .. note:: - This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry ``scp_extra_args="-O"`` into the ini file named ``ansible.cfg``. + This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry \ :literal:`scp\_extra\_args="-O"`\ into the ini file named \ :literal:`ansible.cfg`\ . diff --git a/docs/source/modules/zos_script.rst b/docs/source/modules/zos_script.rst index 31b237588..6f36e05e2 100644 --- a/docs/source/modules/zos_script.rst +++ b/docs/source/modules/zos_script.rst @@ -16,7 +16,7 @@ zos_script -- Run scripts in z/OS Synopsis -------- -- The `zos_script <./zos_script.html>`_ module runs a local or remote script in the remote machine. +- The \ `zos\_script <./zos_script.html>`__\ module runs a local or remote script in the remote machine. @@ -56,7 +56,7 @@ creates encoding Specifies which encodings the script should be converted from and to. - If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. + If \ :literal:`encoding`\ is not provided, the module determines which local and remote charsets to convert the data from and to. | **required**: False | **type**: dict @@ -87,9 +87,9 @@ executable remote_src - If set to ``false``, the module will search the script in the controller. + If set to \ :literal:`false`\ , the module will search the script in the controller. - If set to ``true``, the module will search the script in the remote machine. + If set to \ :literal:`true`\ , the module will search the script in the remote machine. | **required**: False | **type**: bool @@ -103,13 +103,13 @@ removes use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when ``src`` is a local file or directory. + Only valid when \ :literal:`src`\ is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ | **required**: False | **type**: bool @@ -119,9 +119,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . - These options are ignored unless ``use_template`` is true. + These options are ignored unless \ :literal:`use\_template`\ is true. | **required**: False | **type**: dict @@ -200,7 +200,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -284,7 +284,7 @@ Notes .. note:: When executing local scripts, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file being copied. - The location in the z/OS system where local scripts will be copied to can be configured through Ansible's ``remote_tmp`` option. Refer to `Ansible's documentation `_ for more information. + The location in the z/OS system where local scripts will be copied to can be configured through Ansible's \ :literal:`remote\_tmp`\ option. Refer to \ `Ansible's documentation `__\ for more information. All local scripts copied to a remote z/OS system will be removed from the managed node before the module finishes executing. @@ -292,13 +292,13 @@ Notes The module will only add execution permissions for the file owner. - If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error ``BPXW0003I``. + If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error \ :literal:`BPXW0003I`\ . - For supported character sets used to encode data, refer to the `documentation `_. + For supported character sets used to encode data, refer to the \ `documentation `__\ . - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with `zos_tso_command <./zos_tso_command.html>`_. + This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with \ `zos\_tso\_command <./zos_tso_command.html>`__\ . The community script module does not rely on Python to execute scripts on a managed node, while this module does. Python must be present on the remote machine. diff --git a/docs/source/modules/zos_tso_command.rst b/docs/source/modules/zos_tso_command.rst index 4af6b1b52..b35c13a1b 100644 --- a/docs/source/modules/zos_tso_command.rst +++ b/docs/source/modules/zos_tso_command.rst @@ -40,7 +40,7 @@ commands max_rc Specifies the maximum return code allowed for a TSO command. - If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. + If more than one TSO command is submitted, the \ :emphasis:`max\_rc`\ applies to all TSO commands. | **required**: False | **type**: int @@ -119,7 +119,7 @@ output max_rc Specifies the maximum return code allowed for a TSO command. - If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. + If more than one TSO command is submitted, the \ :emphasis:`max\_rc`\ applies to all TSO commands. | **returned**: always | **type**: int diff --git a/docs/source/modules/zos_unarchive.rst b/docs/source/modules/zos_unarchive.rst index 91fa597ee..a53747d6c 100644 --- a/docs/source/modules/zos_unarchive.rst +++ b/docs/source/modules/zos_unarchive.rst @@ -16,8 +16,8 @@ zos_unarchive -- Unarchive files and data sets in z/OS. Synopsis -------- -- The ``zos_unarchive`` module unpacks an archive after optionally transferring it to the remote system. -- For supported archive formats, see option ``format``. +- The \ :literal:`zos\_unarchive`\ module unpacks an archive after optionally transferring it to the remote system. +- For supported archive formats, see option \ :literal:`format`\ . - Supported sources are USS (UNIX System Services) or z/OS data sets. - Mixing MVS data sets with USS files for unarchiving is not supported. - The archive is sent to the remote as binary, so no encoding is performed. @@ -33,11 +33,11 @@ Parameters src The remote absolute path or data set of the archive to be uncompressed. - *src* can be a USS file or MVS data set name. + \ :emphasis:`src`\ can be a USS file or MVS data set name. USS file paths should be absolute paths. - MVS data sets supported types are ``SEQ``, ``PDS``, ``PDSE``. + MVS data sets supported types are \ :literal:`SEQ`\ , \ :literal:`PDS`\ , \ :literal:`PDSE`\ . | **required**: True | **type**: str @@ -72,14 +72,14 @@ format If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - When providing the *xmit_log_data_set* name, ensure there is adequate space. + When providing the \ :emphasis:`xmit\_log\_data\_set`\ name, ensure there is adequate space. | **required**: False | **type**: str use_adrdssu - If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using ``xmit`` or ``terse``. + If set to true, the \ :literal:`zos\_archive`\ module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using \ :literal:`xmit`\ or \ :literal:`terse`\ . | **required**: False | **type**: bool @@ -87,7 +87,7 @@ format dest_volumes - When *use_adrdssu=True*, specify the volume the data sets will be written to. + When \ :emphasis:`use\_adrdssu=True`\ , specify the volume the data sets will be written to. If no volume is specified, storage management rules will be used to determine the volume where the file will be unarchived. @@ -103,7 +103,7 @@ format dest The remote absolute path or data set where the content should be unarchived to. - *dest* can be a USS file, directory or MVS data set name. + \ :emphasis:`dest`\ can be a USS file, directory or MVS data set name. If dest has missing parent directories, they will not be created. @@ -116,7 +116,7 @@ group When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if ``dest`` is USS, otherwise ignored. + This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. | **required**: False | **type**: str @@ -125,13 +125,13 @@ group mode The permission of the uncompressed files. - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. - The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. + The mode may also be specified as a symbolic mode (for example, \`\`u+rwx\`\` or \`\`u=rw,g=r,o=r\`\`) or a special string \`preserve\`. - *mode=preserve* means that the file will be given the same permissions as the source file. + \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the source file. | **required**: False | **type**: str @@ -149,7 +149,7 @@ owner include A list of directories, files or data set names to extract from the archive. - When ``include`` is set, only those files will we be extracted leaving the remaining files in the archive. + When \ :literal:`include`\ is set, only those files will we be extracted leaving the remaining files in the archive. Mutually exclusive with exclude. @@ -177,7 +177,7 @@ list dest_data_set - Data set attributes to customize a ``dest`` data set that the archive will be copied into. + Data set attributes to customize a \ :literal:`dest`\ data set that the archive will be copied into. | **required**: False | **type**: dict @@ -195,23 +195,23 @@ dest_data_set | **required**: False | **type**: str - | **default**: SEQ - | **choices**: SEQ, PDS, PDSE + | **default**: seq + | **choices**: seq, pds, pdse space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using *space_type*. + The unit of space used is set using \ :emphasis:`space\_type`\ . | **required**: False | **type**: int @@ -220,21 +220,21 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are ``K``, ``M``, ``G``, ``CYL``, and ``TRK``. + Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . | **required**: False | **type**: str - | **choices**: K, M, G, CYL, TRK + | **choices**: k, m, g, cyl, trk record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) + If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`fb`\ ) - Choices are case-insensitive. + Choices are case-sensitive. | **required**: False | **type**: str - | **choices**: FB, VB, FBA, VBA, U + | **choices**: fb, vb, fba, vba, u record_length @@ -265,9 +265,9 @@ dest_data_set key_offset The key offset to use when creating a KSDS data set. - *key_offset* is required when *type=KSDS*. + \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . - *key_offset* should only be provided when *type=KSDS* + \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -276,9 +276,9 @@ dest_data_set key_length The key length to use when creating a KSDS data set. - *key_length* is required when *type=KSDS*. + \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . - *key_length* should only be provided when *type=KSDS* + \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ | **required**: False | **type**: int @@ -327,7 +327,7 @@ dest_data_set tmp_hlq Override the default high level qualifier (HLQ) for temporary data sets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str @@ -342,9 +342,9 @@ force remote_src - If set to true, ``zos_unarchive`` retrieves the archive from the remote system. + If set to true, \ :literal:`zos\_unarchive`\ retrieves the archive from the remote system. - If set to false, ``zos_unarchive`` searches the local machine (Ansible controller) for the archive. + If set to false, \ :literal:`zos\_unarchive`\ searches the local machine (Ansible controller) for the archive. | **required**: False | **type**: bool @@ -404,7 +404,7 @@ Notes .. note:: VSAMs are not supported. - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. diff --git a/docs/source/modules/zos_volume_init.rst b/docs/source/modules/zos_volume_init.rst index 195435924..25a0897b9 100644 --- a/docs/source/modules/zos_volume_init.rst +++ b/docs/source/modules/zos_volume_init.rst @@ -17,14 +17,14 @@ zos_volume_init -- Initialize volumes or minidisks. Synopsis -------- - Initialize a volume or minidisk on z/OS. -- *zos_volume_init* will create the volume label and entry into the volume table of contents (VTOC). +- \ :emphasis:`zos\_volume\_init`\ will create the volume label and entry into the volume table of contents (VTOC). - Volumes are used for storing data and executable programs. - A minidisk is a portion of a disk that is linked to your virtual machine. - A VTOC lists the data sets that reside on a volume, their location, size, and other attributes. -- *zos_volume_init* uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class `STGADMIN.ICK.INIT`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. -- ICKDSF is an Authorized Program Facility (APF) program on z/OS, *zos_volume_init* will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. +- \ :emphasis:`zos\_volume\_init`\ uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class \`STGADMIN.ICK.INIT\`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. +- ICKDSF is an Authorized Program Facility (APF) program on z/OS, \ :emphasis:`zos\_volume\_init`\ will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. - Note that defaults set on target z/OS systems may override ICKDSF parameters. -- If is recommended that data on the volume is backed up as the *zos_volume_init* module will not perform any backups. You can use the `zos_backup_restore <./zos_backup_restore.html>`_ module to backup a volume. +- If is recommended that data on the volume is backed up as the \ :emphasis:`zos\_volume\_init`\ module will not perform any backups. You can use the \ `zos\_backup\_restore <./zos_backup_restore.html>`__\ module to backup a volume. @@ -35,9 +35,9 @@ Parameters address - *address* is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. + \ :emphasis:`address`\ is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. - *address* can be the number assigned to the device (device number) when it is installed or the virtual address. + \ :emphasis:`address`\ can be the number assigned to the device (device number) when it is installed or the virtual address. | **required**: True | **type**: str @@ -46,15 +46,15 @@ address verify_volid Verify that the volume serial matches what is on the existing volume or minidisk. - *verify_volid* must be 1 to 6 alphanumeric characters or ``*NONE*``. + \ :emphasis:`verify\_volid`\ must be 1 to 6 alphanumeric characters or \ :literal:`\*NONE\*`\ . - To verify that a volume serial number does not exist, use *verify_volid=*NONE**. + To verify that a volume serial number does not exist, use \ :emphasis:`verify\_volid=\*NONE\*`\ . - If *verify_volid* is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. + If \ :emphasis:`verify\_volid`\ is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. - If *verify_volid=*NONE** is specified and a volume serial is found on the volume or minidisk, initialization does not complete. + If \ :emphasis:`verify\_volid=\*NONE\*`\ is specified and a volume serial is found on the volume or minidisk, initialization does not complete. - Note, this option is **not** a boolean, leave it blank to skip the verification. + Note, this option is \ :strong:`not`\ a boolean, leave it blank to skip the verification. | **required**: False | **type**: str @@ -73,11 +73,11 @@ volid Expects 1-6 alphanumeric, national ($,#,@) or special characters. - A *volid* with less than 6 characters will be padded with spaces. + A \ :emphasis:`volid`\ with less than 6 characters will be padded with spaces. - A *volid* can also be referred to as volser or volume serial number. + A \ :emphasis:`volid`\ can also be referred to as volser or volume serial number. - When *volid* is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. + When \ :emphasis:`volid`\ is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. | **required**: False | **type**: str @@ -99,7 +99,7 @@ index The VTOC index enhances the performance of VTOC access. - When set to *false*, no index will be created. + When set to \ :emphasis:`false`\ , no index will be created. | **required**: False | **type**: bool @@ -109,7 +109,7 @@ index sms_managed Specifies that the volume be managed by Storage Management System (SMS). - If *sms_managed* is *true* then *index* must also be *true*. + If \ :emphasis:`sms\_managed`\ is \ :emphasis:`true`\ then \ :emphasis:`index`\ must also be \ :emphasis:`true`\ . | **required**: False | **type**: bool @@ -127,7 +127,7 @@ verify_volume_empty tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. | **required**: False | **type**: str diff --git a/plugins/action/zos_copy.py b/plugins/action/zos_copy.py index e9c238b87..e3ea36dc8 100644 --- a/plugins/action/zos_copy.py +++ b/plugins/action/zos_copy.py @@ -403,7 +403,7 @@ def _remote_cleanup(self, dest, dest_exists, task_vars): else: module_args = dict(name=dest, state="absent") if is_member(dest): - module_args["type"] = "MEMBER" + module_args["type"] = "member" self._execute_module( module_name="ibm.ibm_zos_core.zos_data_set", module_args=module_args, @@ -466,6 +466,16 @@ def _update_result(is_binary, copy_res, original_args, original_src): updated_result["dest_created"] = True updated_result["destination_attributes"] = dest_data_set_attrs + # Setting attributes to lower case to conform to docs. + # Part of the change to lowercase choices in the collection involves having + # a consistent interface that also returns the same values in lowercase. + if "record_format" in updated_result["destination_attributes"]: + updated_result["destination_attributes"]["record_format"] = updated_result["destination_attributes"]["record_format"].lower() + if "space_type" in updated_result["destination_attributes"]: + updated_result["destination_attributes"]["space_type"] = updated_result["destination_attributes"]["space_type"].lower() + if "type" in updated_result["destination_attributes"]: + updated_result["destination_attributes"]["type"] = updated_result["destination_attributes"]["type"].lower() + return updated_result diff --git a/plugins/action/zos_job_submit.py b/plugins/action/zos_job_submit.py index 6bbd0f9d9..8e06c340b 100644 --- a/plugins/action/zos_job_submit.py +++ b/plugins/action/zos_job_submit.py @@ -44,15 +44,15 @@ def run(self, tmp=None, task_vars=None): use_template = _process_boolean(module_args.get("use_template")) location = module_args.get("location") - if use_template and location != "LOCAL": + if use_template and location != "local": result.update(dict( failed=True, changed=False, - msg="Use of Jinja2 templates is only valid for local files. Location is set to '{0}' but should be 'LOCAL'".format(location) + msg="Use of Jinja2 templates is only valid for local files. Location is set to '{0}' but should be 'local'".format(location) )) return result - if location == "LOCAL": + if location == "local": source = self._task.args.get("src", None) diff --git a/plugins/action/zos_unarchive.py b/plugins/action/zos_unarchive.py index 6e679d62d..ed508bcf0 100644 --- a/plugins/action/zos_unarchive.py +++ b/plugins/action/zos_unarchive.py @@ -87,11 +87,11 @@ def run(self, tmp=None, task_vars=None): ) dest = cmd_res.get("stdout") if dest_data_set.get("space_primary") is None: - dest_data_set.update(space_primary=5, space_type="M") + dest_data_set.update(space_primary=5, space_type="m") if format_name == 'terse': - dest_data_set.update(type='SEQ', record_format='FB', record_length=1024) + dest_data_set.update(type='seq', record_format='fb', record_length=1024) if format_name == 'xmit': - dest_data_set.update(type='SEQ', record_format='FB', record_length=80) + dest_data_set.update(type='seq', record_format='fb', record_length=80) copy_module_args.update( dict( diff --git a/plugins/module_utils/data_set.py b/plugins/module_utils/data_set.py index 3bd502858..40c1a4047 100644 --- a/plugins/module_utils/data_set.py +++ b/plugins/module_utils/data_set.py @@ -919,7 +919,7 @@ def _build_zoau_args(**kwargs): secondary += space_type type = kwargs.get("type") - if type and type == "ZFS": + if type and type.upper() == "ZFS": type = "LDS" volumes = ",".join(volumes) if volumes else None diff --git a/plugins/modules/zos_archive.py b/plugins/modules/zos_archive.py index 951b6bc87..cbe96b65d 100644 --- a/plugins/modules/zos_archive.py +++ b/plugins/modules/zos_archive.py @@ -81,8 +81,8 @@ type: str required: false choices: - - PACK - - SPACK + - pack + - spack xmit_log_data_set: description: - Provide the name of a data set to store xmit log output. @@ -193,9 +193,9 @@ - Organization of the destination type: str required: false - default: SEQ + default: seq choices: - - SEQ + - seq space_primary: description: - If the destination I(dest) data set does not exist , this sets the @@ -214,28 +214,28 @@ description: - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false record_format: description: - If the destination data set does not exist, this sets the format of the data set. (e.g C(FB)) - - Choices are case-insensitive. + - Choices are case-sensitive. required: false choices: - - FB - - VB - - FBA - - VBA - - U + - fb + - vb + - fba + - vba + - u type: str record_length: description: @@ -356,7 +356,7 @@ format: name: terse format_options: - terse_pack: "SPACK" + terse_pack: "spack" use_adrdssu: True # Use a pattern to store @@ -795,17 +795,17 @@ def _create_dest_data_set( arguments.update(name=temp_ds) if record_format is None: - arguments.update(record_format="FB") + arguments.update(record_format="fb") if record_length is None: arguments.update(record_length=80) if type is None: - arguments.update(type="SEQ") + arguments.update(type="seq") if space_primary is None: arguments.update(space_primary=5) if space_secondary is None: arguments.update(space_secondary=3) if space_type is None: - arguments.update(space_type="M") + arguments.update(space_type="m") arguments.pop("self") changed = data_set.DataSet.ensure_present(**arguments) return arguments["name"], changed @@ -819,8 +819,8 @@ def create_dest_ds(self, name): name {str} - name of the newly created data set. """ record_length = XMIT_RECORD_LENGTH if self.format == "xmit" else AMATERSE_RECORD_LENGTH - data_set.DataSet.ensure_present(name=name, replace=True, type='SEQ', record_format='FB', record_length=record_length) - # changed = data_set.DataSet.ensure_present(name=name, replace=True, type='SEQ', record_format='FB', record_length=record_length) + data_set.DataSet.ensure_present(name=name, replace=True, type='seq', record_format='fb', record_length=record_length) + # changed = data_set.DataSet.ensure_present(name=name, replace=True, type='seq', record_format='fb', record_length=record_length) # cmd = "dtouch -rfb -tseq -l{0} {1}".format(record_length, name) # rc, out, err = self.module.run_command(cmd) @@ -952,15 +952,19 @@ def compute_dest_size(self): dest_space += int(ds.total_space) # space unit returned from listings is bytes dest_space = math.ceil(dest_space / 1024) - self.dest_data_set.update(space_primary=dest_space, space_type="K") + self.dest_data_set.update(space_primary=dest_space, space_type="k") class AMATerseArchive(MVSArchive): def __init__(self, module): super(AMATerseArchive, self).__init__(module) self.pack_arg = module.params.get("format").get("format_options").get("terse_pack") + # We store pack_ard in uppercase because the AMATerse command requires + # it in uppercase. if self.pack_arg is None: self.pack_arg = "SPACK" + else: + self.pack_arg = self.pack_arg.upper() def add(self, src, archive): """ @@ -987,8 +991,8 @@ def archive_targets(self): """ if self.use_adrdssu: source, changed = self._create_dest_data_set( - type="SEQ", - record_format="U", + type="seq", + record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True, @@ -1006,8 +1010,8 @@ def archive_targets(self): dest, changed = self._create_dest_data_set( name=self.dest, replace=True, - type='SEQ', - record_format='FB', + type='seq', + record_format='fb', record_length=AMATERSE_RECORD_LENGTH, space_primary=self.dest_data_set.get("space_primary"), space_type=self.dest_data_set.get("space_type")) @@ -1056,8 +1060,8 @@ def archive_targets(self): """ if self.use_adrdssu: source, changed = self._create_dest_data_set( - type="SEQ", - record_format="U", + type="seq", + record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True, @@ -1075,8 +1079,8 @@ def archive_targets(self): dest, changed = self._create_dest_data_set( name=self.dest, replace=True, - type='SEQ', - record_format='FB', + type='seq', + record_format='fb', record_length=XMIT_RECORD_LENGTH, space_primary=self.dest_data_set.get("space_primary"), space_type=self.dest_data_set.get("space_type")) @@ -1137,7 +1141,7 @@ def run_module(): options=dict( terse_pack=dict( type='str', - choices=['PACK', 'SPACK'], + choices=['pack', 'spack'], ), xmit_log_data_set=dict( type='str', @@ -1163,9 +1167,9 @@ def run_module(): ), type=dict( type='str', - choices=['SEQ'], + choices=['seq'], required=False, - default="SEQ", + default="seq", ), space_primary=dict( type='int', required=False), @@ -1173,12 +1177,12 @@ def run_module(): type='int', required=False), space_type=dict( type='str', - choices=['K', 'M', 'G', 'CYL', 'TRK'], + choices=['k', 'm', 'g', 'cyl', 'trk'], required=False, ), record_format=dict( type='str', - choices=["FB", "VB", "FBA", "VBA", "U"], + choices=["fb", "vb", "fba", "vba", "u"], required=False ), record_length=dict(type='int', required=False), @@ -1214,7 +1218,7 @@ def run_module(): terse_pack=dict( type='str', required=False, - choices=['PACK', 'SPACK'], + choices=['pack', 'spack'], ), xmit_log_data_set=dict( type='str', @@ -1226,7 +1230,7 @@ def run_module(): ) ), default=dict( - terse_pack="SPACK", + terse_pack="spack", xmit_log_data_set="", use_adrdssu=False), ), @@ -1234,7 +1238,7 @@ def run_module(): default=dict( name="", format_options=dict( - terse_pack="SPACK", + terse_pack="spack", xmit_log_data_set="", use_adrdssu=False ) @@ -1249,7 +1253,7 @@ def run_module(): required=False, options=dict( name=dict(arg_type='str', required=False), - type=dict(arg_type='str', required=False, default="SEQ"), + type=dict(arg_type='str', required=False, default="seq"), space_primary=dict(arg_type='int', required=False), space_secondary=dict( arg_type='int', required=False), diff --git a/plugins/modules/zos_backup_restore.py b/plugins/modules/zos_backup_restore.py index 3185652e1..a112da247 100644 --- a/plugins/modules/zos_backup_restore.py +++ b/plugins/modules/zos_backup_restore.py @@ -168,15 +168,15 @@ space_type: description: - The unit of measurement to use when defining data set space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). - - When I(full_volume=True), I(space_type) defaults to C(G), otherwise default is C(M) + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). + - When I(full_volume=True), I(space_type) defaults to C(g), otherwise default is C(m) type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false aliases: - unit @@ -233,7 +233,7 @@ include: user.** backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup all datasets matching the pattern USER.** that are present on the volume MYVOL1 to data set MY.BACKUP.DZP, @@ -245,7 +245,7 @@ volume: MYVOL1 backup_name: MY.BACKUP.DZP space: 100 - space_type: M + space_type: m - name: Backup an entire volume, MYVOL1, to the UNIX file /tmp/temp_backup.dzp, allocate 1GB for data sets used in backup process. @@ -255,7 +255,7 @@ volume: MYVOL1 full_volume: yes space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Use z/OS username as new HLQ. @@ -299,7 +299,7 @@ full_volume: yes backup_name: MY.BACKUP.DZP space: 1 - space_type: G + space_type: g - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. Specify DB2SMS10 for the SMS storage and management classes to use for the restored @@ -346,7 +346,7 @@ def main(): ), ), space=dict(type="int", required=False, aliases=["size"]), - space_type=dict(type="str", required=False, aliases=["unit"], choices=["K", "M", "G", "CYL", "TRK"]), + space_type=dict(type="str", required=False, aliases=["unit"], choices=["k", "m", "g", "cyl", "trk"]), volume=dict(type="str", required=False), full_volume=dict(type="bool", default=False), temp_volume=dict(type="str", required=False, aliases=["dest_volume"]), @@ -709,12 +709,12 @@ def space_type_type(contents, dependencies): """ if contents is None: if dependencies.get("full_volume"): - return "G" + return "g" else: - return "M" - if not match(r"^(M|G|K|TRK|CYL)$", contents, IGNORECASE): + return "m" + if not match(r"^(m|g|k|trk|cyl)$", contents, IGNORECASE): raise ValueError( - 'Value {0} is invalid for space_type argument. Valid space types are "K", "M", "G", "TRK" or "CYL".'.format( + 'Value {0} is invalid for space_type argument. Valid space types are "k", "m", "g", "trk" or "cyl".'.format( contents ) ) diff --git a/plugins/modules/zos_copy.py b/plugins/modules/zos_copy.py index 9acb3c1c6..da29f688a 100644 --- a/plugins/modules/zos_copy.py +++ b/plugins/modules/zos_copy.py @@ -347,16 +347,16 @@ type: str required: true choices: - - KSDS - - ESDS - - RRDS - - LDS - - SEQ - - PDS - - PDSE - - MEMBER - - BASIC - - LIBRARY + - ksds + - esds + - rrds + - lds + - seq + - pds + - pdse + - member + - basic + - library space_primary: description: - If the destination I(dest) data set does not exist , this sets the @@ -375,27 +375,27 @@ description: - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false record_format: description: - If the destination data set does not exist, this sets the format of the - data set. (e.g C(FB)) - - Choices are case-insensitive. + data set. (e.g C(fb)) + - Choices are case-sensitive. required: false choices: - - FB - - VB - - FBA - - VBA - - U + - fb + - vb + - fba + - vba + - u type: str record_length: description: @@ -417,15 +417,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false sms_storage_class: @@ -642,11 +642,11 @@ remote_src: true volume: '222222' dest_data_set: - type: SEQ + type: seq space_primary: 10 space_secondary: 3 - space_type: K - record_format: VB + space_type: k + record_format: vb record_length: 150 - name: Copy a Program Object and its aliases on a remote system to a new PDSE member MYCOBOL @@ -702,7 +702,7 @@ description: Record format of the dataset. type: str - sample: FB + sample: fb record_length: description: Record length of the dataset. @@ -722,21 +722,21 @@ description: Unit of measurement for space. type: str - sample: K + sample: k type: description: Type of dataset allocated. type: str - sample: PDSE + sample: pdse sample: { "block_size": 32760, - "record_format": "FB", + "record_format": "fb", "record_length": 45, "space_primary": 2, "space_secondary": 1, - "space_type": "K", - "type": "PDSE" + "space_type": "k", + "type": "pdse" } checksum: description: SHA256 checksum of the file after running zos_copy. @@ -2802,7 +2802,7 @@ def run_module(module, arg_def): # dest_data_set.type overrides `dest_ds_type` given precedence rules if dest_data_set and dest_data_set.get("type"): - dest_ds_type = dest_data_set.get("type") + dest_ds_type = dest_data_set.get("type").upper() elif executable: """ When executable is selected and dest_exists is false means an executable PDSE was copied to remote, so we need to provide the correct dest_ds_type that will later be transformed into LIBRARY. @@ -2810,16 +2810,7 @@ def run_module(module, arg_def): and LIBRARY is not in MVS_PARTITIONED frozen set.""" dest_ds_type = "PDSE" - if dest_data_set and (dest_data_set.get('record_format', '') == 'FBA' or dest_data_set.get('record_format', '') == 'VBA'): - dest_has_asa_chars = True - elif not dest_exists and asa_text: - dest_has_asa_chars = True - elif dest_exists and dest_ds_type not in data_set.DataSet.MVS_VSAM: - dest_attributes = datasets.list_datasets(dest_name)[0] - if dest_attributes.record_format == 'FBA' or dest_attributes.record_format == 'VBA': - dest_has_asa_chars = True - - if dest_data_set and (dest_data_set.get('record_format', '') == 'FBA' or dest_data_set.get('record_format', '') == 'VBA'): + if dest_data_set and (dest_data_set.get('record_format', '') == 'fba' or dest_data_set.get('record_format', '') == 'vba'): dest_has_asa_chars = True elif not dest_exists and asa_text: dest_has_asa_chars = True @@ -3177,8 +3168,8 @@ def main(): options=dict( type=dict( type='str', - choices=['BASIC', 'KSDS', 'ESDS', 'RRDS', - 'LDS', 'SEQ', 'PDS', 'PDSE', 'MEMBER', 'LIBRARY'], + choices=['basic', 'ksds', 'esds', 'rrds', + 'lds', 'seq', 'pds', 'pdse', 'member', 'library'], required=True, ), space_primary=dict( @@ -3187,12 +3178,12 @@ def main(): type='int', required=False), space_type=dict( type='str', - choices=['K', 'M', 'G', 'CYL', 'TRK'], + choices=['k', 'm', 'g', 'cyl', 'trk'], required=False, ), record_format=dict( type='str', - choices=["FB", "VB", "FBA", "VBA", "U"], + choices=["fb", "vb", "fba", "vba", "u"], required=False ), record_length=dict(type='int', required=False), diff --git a/plugins/modules/zos_data_set.py b/plugins/modules/zos_data_set.py index 1969462c3..446fd6fe7 100644 --- a/plugins/modules/zos_data_set.py +++ b/plugins/modules/zos_data_set.py @@ -33,7 +33,7 @@ - The name of the data set being managed. (e.g C(USER.TEST)) - If I(name) is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - - Required if I(type=MEMBER) or I(state!=present) and not using I(batch). + - Required if I(type=member) or I(state!=present) and not using I(batch). type: str required: false state: @@ -46,7 +46,7 @@ If I(state=absent) and the data set does exist on the managed node, remove the data set, module completes successfully with I(changed=True). - > - If I(state=absent) and I(type=MEMBER) and I(force=True), the data set + If I(state=absent) and I(type=member) and I(force=True), the data set will be opened with I(DISP=SHR) such that the entire data set can be accessed by other processes while the specified member is deleted. - > @@ -77,7 +77,7 @@ If I(state=present) and I(replace=False) and the data set is present on the managed node, no action taken, module completes successfully with I(changed=False). - > - If I(state=present) and I(type=MEMBER) and the member does not exist in the data set, + If I(state=present) and I(type=member) and the member does not exist in the data set, create a member formatted to store data, module completes successfully with I(changed=True). Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, @@ -109,26 +109,26 @@ - uncataloged type: description: - - The data set type to be used when creating a data set. (e.g C(pdse)) - - C(MEMBER) expects to be used with an existing partitioned data set. + - The data set type to be used when creating a data set. (e.g C(pdse)). + - C(member) expects to be used with an existing partitioned data set. - Choices are case-sensitive. required: false type: str choices: - - KSDS - - ESDS - - RRDS - - LDS - - SEQ - - PDS - - PDSE - - LIBRARY - - BASIC - - LARGE - - MEMBER - - HFS - - ZFS - default: PDS + - ksds + - esds + - rrds + - lds + - seq + - pds + - pdse + - library + - basic + - large + - member + - hfs + - zfs + default: pds space_primary: description: - The amount of primary space to allocate for the dataset. @@ -146,33 +146,33 @@ space_type: description: - The unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false - default: M + default: m record_format: description: - The format of the data set. (e.g C(FB)) - Choices are case-sensitive. - - When I(type=KSDS), I(type=ESDS), I(type=RRDS), I(type=LDS) or I(type=ZFS) + - When I(type=ksds), I(type=esds), I(type=rrds), I(type=lds) or I(type=zfs) then I(record_format=None), these types do not have a default I(record_format). required: false choices: - - FB - - VB - - FBA - - VBA - - U - - F + - fb + - vb + - fba + - vba + - u + - f type: str - default: FB + default: fb aliases: - format sms_storage_class: @@ -221,15 +221,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false volumes: @@ -281,7 +281,7 @@ - The I(force=True) option enables sharing of data sets through the disposition I(DISP=SHR). - The I(force=True) only applies to data set members when I(state=absent) - and I(type=MEMBER). + and I(type=member). type: bool required: false default: false @@ -297,7 +297,7 @@ - The name of the data set being managed. (e.g C(USER.TEST)) - If I(name) is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - - Required if I(type=MEMBER) or I(state!=present) + - Required if I(type=member) or I(state!=present) type: str required: false state: @@ -310,7 +310,7 @@ If I(state=absent) and the data set does exist on the managed node, remove the data set, module completes successfully with I(changed=True). - > - If I(state=absent) and I(type=MEMBER) and I(force=True), the data + If I(state=absent) and I(type=member) and I(force=True), the data set will be opened with I(DISP=SHR) such that the entire data set can be accessed by other processes while the specified member is deleted. @@ -342,7 +342,7 @@ If I(state=present) and I(replace=False) and the data set is present on the managed node, no action taken, module completes successfully with I(changed=False). - > - If I(state=present) and I(type=MEMBER) and the member does not exist in the data set, + If I(state=present) and I(type=member) and the member does not exist in the data set, create a member formatted to store data, module completes successfully with I(changed=True). Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, @@ -374,26 +374,26 @@ - uncataloged type: description: - - The data set type to be used when creating a data set. (e.g C(PDSE)) - - C(MEMBER) expects to be used with an existing partitioned data set. + - The data set type to be used when creating a data set. (e.g C(pdse)) + - C(member) expects to be used with an existing partitioned data set. - Choices are case-sensitive. required: false type: str choices: - - KSDS - - ESDS - - RRDS - - LDS - - SEQ - - PDS - - PDSE - - LIBRARY - - BASIC - - LARGE - - MEMBER - - HFS - - ZFS - default: PDS + - ksds + - esds + - rrds + - lds + - seq + - pds + - pdse + - library + - basic + - large + - member + - hfs + - zfs + default: pds space_primary: description: - The amount of primary space to allocate for the dataset. @@ -411,33 +411,33 @@ space_type: description: - The unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false - default: M + default: m record_format: description: - The format of the data set. (e.g C(FB)) - Choices are case-sensitive. - - When I(type=KSDS), I(type=ESDS), I(type=RRDS), I(type=LDS) or - I(type=ZFS) then I(record_format=None), these types do not have a + - When I(type=ksds), I(type=esds), I(type=rrds), I(type=lds) or + I(type=zfs) then I(record_format=None), these types do not have a default I(record_format). required: false choices: - - FB - - VB - - FBA - - VBA - - U - - F + - fb + - vb + - fba + - vba + - u + - f type: str - default: FB + default: fb aliases: - format sms_storage_class: @@ -486,15 +486,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false volumes: @@ -539,7 +539,7 @@ - The I(force=True) option enables sharing of data sets through the disposition I(DISP=SHR). - The I(force=True) only applies to data set members when - I(state=absent) and I(type=MEMBER). + I(state=absent) and I(type=member). type: bool required: false default: false @@ -549,7 +549,7 @@ - name: Create a sequential data set if it does not exist zos_data_set: name: someds.name.here - type: SEQ + type: seq state: present - name: Create a PDS data set if it does not exist @@ -557,27 +557,27 @@ name: someds.name.here type: pds space_primary: 5 - space_type: M - record_format: FBA + space_type: m + record_format: fba record_length: 25 - name: Attempt to replace a data set if it exists zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 replace: yes - name: Attempt to replace a data set if it exists. If not found in the catalog, check if it is available on volume 222222, and catalog if found. zos_data_set: name: someds.name.here - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: U + space_type: m + record_format: u record_length: 25 volumes: "222222" replace: yes @@ -585,19 +585,19 @@ - name: Create an ESDS data set if it does not exist zos_data_set: name: someds.name.here - type: ESDS + type: esds - name: Create a KSDS data set if it does not exist zos_data_set: name: someds.name.here - type: KSDS + type: ksds key_length: 8 key_offset: 0 - name: Create an RRDS data set with storage class MYDATA if it does not exist zos_data_set: name: someds.name.here - type: RRDS + type: rrds sms_storage_class: mydata - name: Delete a data set if it exists @@ -614,43 +614,43 @@ - name: Write a member to an existing PDS; replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member replace: yes - name: Write a member to an existing PDS; do not replace if member exists zos_data_set: name: someds.name.here(mydata) - type: MEMBER + type: member - name: Remove a member from an existing PDS zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member - name: Remove a member from an existing PDS/E by opening with disposition DISP=SHR zos_data_set: name: someds.name.here(mydata) state: absent - type: MEMBER + type: member force: yes - name: Create multiple partitioned data sets and add one or more members to each zos_data_set: batch: - name: someds.name.here1 - type: PDS + type: pds space_primary: 5 - space_type: M - record_format: FB + space_type: m + record_format: fb replace: yes - name: someds.name.here1(member1) - type: MEMBER + type: member - name: someds.name.here2(member1) - type: MEMBER + type: member replace: yes - name: someds.name.here2(member2) - type: MEMBER + type: member - name: Catalog a data set present on volume 222222 if it is uncataloged. zos_data_set: @@ -689,44 +689,44 @@ # CONSTANTS DATA_SET_TYPES = [ - "KSDS", - "ESDS", - "RRDS", - "LDS", - "SEQ", - "PDS", - "PDSE", - "BASIC", - "LARGE", - "LIBRARY", - "MEMBER", - "HFS", - "ZFS", + "ksds", + "esds", + "rrds", + "lds", + "seq", + "pds", + "pdse", + "basic", + "large", + "library", + "member", + "hfs", + "zfs", ] DATA_SET_FORMATS = [ - "FB", - "VB", - "FBA", - "VBA", - "U", - "F", + "fb", + "vb", + "fba", + "vba", + "u", + "f", ] DEFAULT_RECORD_LENGTHS = { - "FB": 80, - "FBA": 80, - "VB": 137, - "VBA": 137, - "U": 0, + "fb": 80, + "fba": 80, + "vb": 137, + "vba": 137, + "u": 0, } DATA_SET_TYPES_VSAM = [ - "KSDS", - "ESDS", - "RRDS", - "LDS", - "ZFS", + "ksds", + "esds", + "rrds", + "lds", + "zfs", ] # ------------- Functions to validate arguments ------------- # @@ -775,14 +775,14 @@ def data_set_name(contents, dependencies): if contents is None: if dependencies.get("state") != "present": raise ValueError('Data set name must be provided when "state!=present"') - if dependencies.get("type") != "MEMBER": + if dependencies.get("type") != "member": tmphlq = dependencies.get("tmp_hlq") if tmphlq is None: tmphlq = "" contents = DataSet.temp_name(tmphlq) else: raise ValueError( - 'Data set and member name must be provided when "type=MEMBER"' + 'Data set and member name must be provided when "type=member"' ) dsname = str(contents) if not re.fullmatch( @@ -796,7 +796,7 @@ def data_set_name(contents, dependencies): dsname, re.IGNORECASE, ) - and dependencies.get("type") == "MEMBER" + and dependencies.get("type") == "member" ): raise ValueError( "Value {0} is invalid for data set argument.".format(dsname) @@ -809,13 +809,13 @@ def space_type(contents, dependencies): """Validates provided data set unit of space is valid. Returns the unit of space.""" if dependencies.get("state") == "absent": - return "M" + return "m" if contents is None: return None - match = re.fullmatch(r"(M|G|K|TRK|CYL)", contents, re.IGNORECASE) + match = re.fullmatch(r"(m|g|k|trk|cyl)", contents, re.IGNORECASE) if not match: raise ValueError( - 'Value {0} is invalid for space_type argument. Valid space types are "K", "M", "G", "TRK" or "CYL".'.format( + 'Value {0} is invalid for space_type argument. Valid space types are "k", "m", "g", "trk" or "cyl".'.format( contents ) ) @@ -872,12 +872,11 @@ def record_length(contents, dependencies): # * dependent on state # * dependent on record_length def record_format(contents, dependencies): - """Validates data set format is valid. - Returns uppercase data set format.""" + """Validates data set format is valid.""" if dependencies.get("state") == "absent": - return "FB" + return "fb" if contents is None: - return "FB" + return "fb" formats = "|".join(DATA_SET_FORMATS) if not re.fullmatch(formats, contents, re.IGNORECASE): raise ValueError( @@ -885,17 +884,16 @@ def record_format(contents, dependencies): contents, ", ".join(DATA_SET_FORMATS) ) ) - return contents.upper() + return contents # * dependent on state def data_set_type(contents, dependencies): - """Validates data set type is valid. - Returns uppercase data set type.""" - # if dependencies.get("state") == "absent" and contents != "MEMBER": + """Validates data set type is valid.""" + # if dependencies.get("state") == "absent" and contents != "member": # return None if contents is None: - return "PDS" + return "pds" types = "|".join(DATA_SET_TYPES) if not re.fullmatch(types, contents, re.IGNORECASE): raise ValueError( @@ -903,7 +901,7 @@ def data_set_type(contents, dependencies): contents, ", ".join(DATA_SET_TYPES) ) ) - return contents.upper() + return contents # * dependent on state @@ -936,10 +934,10 @@ def key_length(contents, dependencies): Returns data set key length as integer.""" if dependencies.get("state") == "absent": return None - if dependencies.get("type") == "KSDS" and contents is None: + if dependencies.get("type") == "ksds" and contents is None: raise ValueError("key_length is required when requesting KSDS data set.") - if dependencies.get("type") != "KSDS" and contents is not None: - raise ValueError("key_length is only valid when type=KSDS.") + if dependencies.get("type") != "ksds" and contents is not None: + raise ValueError("key_length is only valid when type=ksds.") if contents is None: return None contents = int(contents) @@ -958,10 +956,10 @@ def key_offset(contents, dependencies): Returns data set key offset as integer.""" if dependencies.get("state") == "absent": return None - if dependencies.get("type") == "KSDS" and contents is None: + if dependencies.get("type") == "ksds" and contents is None: raise ValueError("key_offset is required when requesting KSDS data set.") - if dependencies.get("type") != "KSDS" and contents is not None: - raise ValueError("key_offset is only valid when type=KSDS.") + if dependencies.get("type") != "ksds" and contents is not None: + raise ValueError("key_offset is only valid when type=ksds.") if contents is None: return None contents = int(contents) @@ -981,13 +979,13 @@ def perform_data_set_operations(name, state, **extra_args): # passing in **extra_args forced me to modify the acceptable parameters # for multiple functions in data_set.py including ensure_present, replace # and create where the force parameter has no bearing. - if state == "present" and extra_args.get("type") != "MEMBER": + if state == "present" and extra_args.get("type") != "member": changed = DataSet.ensure_present(name, **extra_args) - elif state == "present" and extra_args.get("type") == "MEMBER": + elif state == "present" and extra_args.get("type") == "member": changed = DataSet.ensure_member_present(name, extra_args.get("replace")) - elif state == "absent" and extra_args.get("type") != "MEMBER": + elif state == "absent" and extra_args.get("type") != "member": changed = DataSet.ensure_absent(name, extra_args.get("volumes")) - elif state == "absent" and extra_args.get("type") == "MEMBER": + elif state == "absent" and extra_args.get("type") == "member": changed = DataSet.ensure_member_absent(name, extra_args.get("force")) elif state == "cataloged": changed = DataSet.ensure_cataloged(name, extra_args.get("volumes")) @@ -1024,8 +1022,8 @@ def parse_and_validate_args(params): type=space_type, required=False, dependencies=["state"], - choices=["K", "M", "G", "CYL", "TRK"], - default="M", + choices=["k", "m", "g", "cyl", "trk"], + default="m", ), space_primary=dict(type="int", required=False, dependencies=["state"]), space_secondary=dict( @@ -1035,9 +1033,9 @@ def parse_and_validate_args(params): type=record_format, required=False, dependencies=["state"], - choices=["FB", "VB", "FBA", "VBA", "U", "F"], + choices=["fb", "vb", "fba", "vba", "u", "f"], aliases=["format"], - default="FB", + default="fb", ), sms_management_class=dict( type=sms_class, required=False, dependencies=["state"] @@ -1113,8 +1111,8 @@ def parse_and_validate_args(params): type=space_type, required=False, dependencies=["state"], - choices=["K", "M", "G", "CYL", "TRK"], - default="M", + choices=["k", "m", "g", "cyl", "trk"], + default="m", ), space_primary=dict(type="int", required=False, dependencies=["state"]), space_secondary=dict(type="int", required=False, dependencies=["state"]), @@ -1122,9 +1120,9 @@ def parse_and_validate_args(params): type=record_format, required=False, dependencies=["state"], - choices=["FB", "VB", "FBA", "VBA", "U", "F"], + choices=["fb", "vb", "fba", "vba", "u", "f"], aliases=["format"], - default="FB", + default="fb", ), sms_management_class=dict( type=sms_class, required=False, dependencies=["state"] @@ -1224,14 +1222,14 @@ def run_module(): type=dict( type="str", required=False, - default="PDS", + default="pds", choices=DATA_SET_TYPES, ), space_type=dict( type="str", required=False, - default="M", - choices=["K", "M", "G", "CYL", "TRK"], + default="m", + choices=["k", "m", "g", "cyl", "trk"], ), space_primary=dict(type="int", required=False, default=5), space_secondary=dict(type="int", required=False, default=3), @@ -1239,8 +1237,8 @@ def run_module(): type="str", required=False, aliases=["format"], - default="FB", - choices=["FB", "VB", "FBA", "VBA", "U", "F"], + default="fb", + choices=["fb", "vb", "fba", "vba", "u", "f"], ), sms_management_class=dict(type="str", required=False), # I know this alias is odd, ZOAU used to document they supported @@ -1289,14 +1287,14 @@ def run_module(): type=dict( type="str", required=False, - default="PDS", + default="pds", choices=DATA_SET_TYPES, ), space_type=dict( type="str", required=False, - default="M", - choices=["K", "M", "G", "CYL", "TRK"], + default="m", + choices=["k", "m", "g", "cyl", "trk"], ), space_primary=dict(type="int", required=False, default=5), space_secondary=dict(type="int", required=False, default=3), @@ -1304,8 +1302,8 @@ def run_module(): type="str", required=False, aliases=["format"], - choices=["FB", "VB", "FBA", "VBA", "U", "F"], - default="FB" + choices=["fb", "vb", "fba", "vba", "u", "f"], + default="fb" ), sms_management_class=dict(type="str", required=False), # I know this alias is odd, ZOAU used to document they supported @@ -1357,7 +1355,7 @@ def run_module(): # This section is copied down inside if/check_mode false, so it modifies after the arg parser if module.params.get("batch") is not None: for entry in module.params.get("batch"): - if entry.get('type') is not None and entry.get("type").upper() in DATA_SET_TYPES_VSAM: + if entry.get('type') is not None and entry.get("type") in DATA_SET_TYPES_VSAM: entry["record_format"] = None if module.params.get("type") is not None: module.params["type"] = None @@ -1374,7 +1372,7 @@ def run_module(): if module.params.get("record_format") is not None: module.params["record_format"] = None elif module.params.get("type") is not None: - if module.params.get("type").upper() in DATA_SET_TYPES_VSAM: + if module.params.get("type") in DATA_SET_TYPES_VSAM: # For VSAM types set the value to nothing and let the code manage it # module.params["record_format"] = None if module.params.get("record_format") is not None: @@ -1394,7 +1392,7 @@ def run_module(): # This *appears* redundant, bit the parse_and_validate reinforces the default value for record_type if data_set_params.get("batch") is not None: for entry in data_set_params.get("batch"): - if entry.get('type') is not None and entry.get("type").upper() in DATA_SET_TYPES_VSAM: + if entry.get('type') is not None and entry.get("type") in DATA_SET_TYPES_VSAM: entry["record_format"] = None if data_set_params.get("type") is not None: data_set_params["type"] = None @@ -1411,7 +1409,7 @@ def run_module(): if data_set_params.get("record_format") is not None: data_set_params["record_format"] = None else: - if data_set_params.get("type").upper() in DATA_SET_TYPES_VSAM: + if data_set_params.get("type") in DATA_SET_TYPES_VSAM: if data_set_params.get("record_format") is not None: data_set_params["record_format"] = None diff --git a/plugins/modules/zos_job_submit.py b/plugins/modules/zos_job_submit.py index 7c66c2543..1b56f459d 100644 --- a/plugins/modules/zos_job_submit.py +++ b/plugins/modules/zos_job_submit.py @@ -42,17 +42,17 @@ (e.g "/User/tester/ansible-playbook/sample.jcl") location: required: false - default: DATA_SET + default: data_set type: str choices: - - DATA_SET - - USS - - LOCAL + - data_set + - uss + - local description: - - The JCL location. Supported choices are ``DATA_SET``, ``USS`` or ``LOCAL``. - - DATA_SET can be a PDS, PDSE, or sequential data set. - - USS means the JCL location is located in UNIX System Services (USS). - - LOCAL means locally to the ansible control node. + - The JCL location. Supported choices are C(data_set), C(uss) or C(local). + - C(data_set) can be a PDS, PDSE, or sequential data set. + - C(uss) means the JCL location is located in UNIX System Services (USS). + - C(local) means locally to the ansible control node. wait_time_s: required: false default: 10 @@ -80,17 +80,17 @@ required: false type: str description: - - The volume serial (VOLSER)is where the data set resides. The option + - The volume serial (VOLSER) is where the data set resides. The option is required only when the data set is not cataloged on the system. - When configured, the L(zos_job_submit,./zos_job_submit.html) will try to catalog the data set for the volume serial. If it is not able to, the module will fail. - - Ignored for I(location=USS) and I(location=LOCAL). + - Ignored for I(location=uss) and I(location=local). encoding: description: - Specifies which encoding the local JCL file should be converted from and to, before submitting the job. - - This option is only supported for when I(location=LOCAL). + - This option is only supported for when I(location=local). - If this parameter is not provided, and the z/OS systems default encoding can not be identified, the JCL file will be converted from UTF-8 to IBM-1047 by default, otherwise the module will detect the z/OS system @@ -561,19 +561,19 @@ - name: Submit JCL in a PDSE member. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set register: response - name: Submit JCL in USS with no DDs in the output. zos_job_submit: src: /u/tester/demo/sample.jcl - location: USS + location: uss return_output: false - name: Convert local JCL to IBM-037 and submit the job. zos_job_submit: src: /Users/maxy/ansible-playbooks/provision/sample.jcl - location: LOCAL + location: local encoding: from: ISO8859-1 to: IBM-037 @@ -581,25 +581,25 @@ - name: Submit JCL in an uncataloged PDSE on volume P2SS01. zos_job_submit: src: HLQ.DATA.LLQ(SAMPLE) - location: DATA_SET + location: data_set volume: P2SS01 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit a long running PDS job and wait up to 30 seconds for completion. zos_job_submit: src: HLQ.DATA.LLQ(LONGRUN) - location: DATA_SET + location: data_set wait_time_s: 30 - name: Submit JCL and set the max return code the module should fail on to 16. zos_job_submit: src: HLQ.DATA.LLQ - location: DATA_SET + location: data_set max_rc: 16 """ @@ -805,8 +805,8 @@ def run_module(): src=dict(type="str", required=True), location=dict( type="str", - default="DATA_SET", - choices=["DATA_SET", "USS", "LOCAL"], + default="data_set", + choices=["data_set", "uss", "local"], ), encoding=dict( type="dict", @@ -875,8 +875,8 @@ def run_module(): src=dict(arg_type="data_set_or_path", required=True), location=dict( arg_type="str", - default="DATA_SET", - choices=["DATA_SET", "USS", "LOCAL"], + default="data_set", + choices=["data_set", "uss", "local"], ), from_encoding=dict( arg_type="encoding", default=Defaults.DEFAULT_ASCII_CHARSET, required=False), @@ -907,7 +907,7 @@ def run_module(): return_output = parsed_args.get("return_output") wait_time_s = parsed_args.get("wait_time_s") max_rc = parsed_args.get("max_rc") - temp_file = parsed_args.get("src") if location == "LOCAL" else None + temp_file = parsed_args.get("src") if location == "local" else None # Default 'changed' is False in case the module is not able to execute result = dict(changed=False) @@ -921,13 +921,13 @@ def run_module(): job_submitted_id = None duration = 0 start_time = timer() - if location == "DATA_SET": + if location == "data_set": job_submitted_id, duration = submit_src_jcl( module, src, src_name=src, timeout=wait_time_s, is_unix=False, volume=volume, start_time=start_time) - elif location == "USS": + elif location == "uss": job_submitted_id, duration = submit_src_jcl( module, src, src_name=src, timeout=wait_time_s, is_unix=True) - elif location == "LOCAL": + elif location == "local": job_submitted_id, duration = submit_src_jcl( module, src, src_name=src, timeout=wait_time_s, is_unix=True) diff --git a/plugins/modules/zos_mount.py b/plugins/modules/zos_mount.py index 3f4c642f3..61ca20b9f 100644 --- a/plugins/modules/zos_mount.py +++ b/plugins/modules/zos_mount.py @@ -48,13 +48,13 @@ description: - The type of file system that will be mounted. - The physical file systems data set format to perform the logical mount. - - The I(fs_type) is required to be uppercase. + - The I(fs_type) is required to be lowercase. type: str choices: - - HFS - - ZFS - - NFS - - TFS + - hfs + - zfs + - nfs + - tfs required: True state: description: @@ -168,33 +168,33 @@ file hierarchy). type: str choices: - - DRAIN - - FORCE - - IMMEDIATE - - NORMAL - - REMOUNT - - RESET + - drain + - force + - immediate + - normal + - remount + - reset required: False - default: NORMAL + default: normal mount_opts: description: - Options available to the mount. - - If I(mount_opts=RO) on a mounted/remount, mount is performed + - If I(mount_opts=ro) on a mounted/remount, mount is performed read-only. - - If I(mount_opts=SAME) and (unmount_opts=REMOUNT), mount is opened + - If I(mount_opts=same) and (unmount_opts=remount), mount is opened in the same mode as previously opened. - - If I(mount_opts=NOWAIT), mount is performed asynchronously. - - If I(mount_opts=NOSECURITY), security checks are not enforced for + - If I(mount_opts=nowait), mount is performed asynchronously. + - If I(mount_opts=nosecurity), security checks are not enforced for files in this file system. type: str choices: - - RO - - RW - - SAME - - NOWAIT - - NOSECURITY + - ro + - rw + - same + - nowait + - nosecurity required: False - default: RW + default: rw src_params: description: - Specifies a parameter string to be passed to the file system type. @@ -206,15 +206,15 @@ description: - If present, tags get written to any untagged file. - When the file system is unmounted, the tags are lost. - - If I(tag_untagged=NOTEXT) none of the untagged files in the file system are + - If I(tag_untagged=notext) none of the untagged files in the file system are automatically converted during file reading and writing. - - If I(tag_untagged=TEXT) each untagged file is implicitly marked as + - If I(tag_untagged=text) each untagged file is implicitly marked as containing pure text data that can be converted. - If this flag is used, use of tag_ccsid is encouraged. type: str choices: - - TEXT - - NOTEXT + - text + - notext required: False tag_ccsid: description: @@ -271,23 +271,23 @@ AUTOMOVE where the file system will be randomly moved to another system (no system list used). - > - I(automove=AUTOMOVE) indicates that ownership of the file system can be + I(automove=automove) indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. - > - I(automove=NOAUTOMOVE) prevents movement of the file system's ownership in some situations. + I(automove=noautomove) prevents movement of the file system's ownership in some situations. - > - I(automove=UNMOUNT) allows the file system to be unmounted in some situations. + I(automove=unmount) allows the file system to be unmounted in some situations. type: str choices: - - AUTOMOVE - - NOAUTOMOVE - - UNMOUNT + - automove + - noautomove + - unmount required: False - default: AUTOMOVE + default: automove automove_list: description: - > - If(automove=AUTOMOVE), this option will be checked. + If(automove=automove), this option will be checked. - > This specifies the list of servers to include or exclude as destinations. - > @@ -317,14 +317,14 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted - name: Unmount a filesystem. zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: unmounted unmount_opts: REMOUNT opts: same @@ -333,7 +333,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: RO @@ -341,7 +341,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -351,7 +351,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted persistent: data_store: SYS1.PARMLIB(BPXPRMAA) @@ -363,7 +363,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted allow_uid: no @@ -371,7 +371,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted opts: nowait @@ -379,7 +379,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted mount_opts: NOSECURITY @@ -387,7 +387,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: I,DEV1,DEV2,DEV3,DEV9 @@ -396,7 +396,7 @@ zos_mount: src: SOMEUSER.VVV.ZFS path: /u/omvsadm/core - fs_type: ZFS + fs_type: zfs state: mounted automove: AUTOMOVE automove_list: EXCLUDE,DEV4,DEV5,DEV6,DEV7 @@ -854,7 +854,7 @@ def run_module(module, arg_def): src, path, fs_type ) ) - if "RO" in mount_opts: + if "ro" in mount_opts: subcmd = "READ" else: subcmd = "RDWR" @@ -882,14 +882,14 @@ def run_module(module, arg_def): fullcmd = fullcmd + " NOSETUID" parmtext = parmtext + "\n NOSETUID" - if "NOWAIT" in mount_opts: + if "nowait" in mount_opts: fullcmd = fullcmd + " NOWAIT" parmtext = parmtext + "\n NOWAIT" else: fullcmd = fullcmd + " WAIT" parmtext = parmtext + "\n WAIT" - if "NOSECURITY" in mount_opts: + if "nosecurity" in mount_opts: fullcmd = fullcmd + " NOSECURITY" parmtext = parmtext + "\n NOSECURITY" else: @@ -1051,10 +1051,10 @@ def main(): fs_type=dict( type="str", choices=[ - "HFS", - "ZFS", - "NFS", - "TFS", + "hfs", + "zfs", + "nfs", + "tfs", ], required=True, ), @@ -1079,27 +1079,27 @@ def main(): ), unmount_opts=dict( type="str", - default="NORMAL", - choices=["DRAIN", "FORCE", "IMMEDIATE", "NORMAL", "REMOUNT", "RESET"], + default="normal", + choices=["drain", "force", "immediate", "normal", "remount", "reset"], required=False, ), mount_opts=dict( type="str", - default="RW", - choices=["RO", "RW", "SAME", "NOWAIT", "NOSECURITY"], + default="rw", + choices=["ro", "rw", "same", "nowait", "nosecurity"], required=False, ), src_params=dict(type="str", required=False), tag_untagged=dict( - type="str", choices=["TEXT", "NOTEXT"], required=False + type="str", choices=["text", "notext"], required=False ), tag_ccsid=dict(type="int", required=False), allow_uid=dict(type="bool", default=True, required=False), sysname=dict(type="str", required=False), automove=dict( type="str", - default="AUTOMOVE", - choices=["AUTOMOVE", "NOAUTOMOVE", "UNMOUNT"], + default="automove", + choices=["automove", "noautomove", "unmount"], required=False, ), automove_list=dict(type="str", required=False), @@ -1114,10 +1114,10 @@ def main(): fs_type=dict( arg_type="str", choices=[ - "HFS", - "ZFS", - "NFS", - "TFS", + "hfs", + "zfs", + "nfs", + "tfs", ], required=True, ), @@ -1139,27 +1139,27 @@ def main(): ), unmount_opts=dict( arg_type="str", - default="NORMAL", - choices=["DRAIN", "FORCE", "IMMEDIATE", "NORMAL", "REMOUNT", "RESET"], + default="normal", + choices=["drain", "force", "immediate", "normal", "remount", "reset"], required=False, ), mount_opts=dict( arg_type="str", - default="RW", - choices=["RO", "RW", "SAME", "NOWAIT", "NOSECURITY"], + default="rw", + choices=["ro", "rw", "same", "nowait", "nosecurity"], required=False, ), src_params=dict(arg_type="str", default="", required=False), tag_untagged=dict( - arg_type="str", choices=["TEXT", "NOTEXT"], required=False + arg_type="str", choices=["text", "notext"], required=False ), tag_ccsid=dict(arg_type="int", required=False), allow_uid=dict(arg_type="bool", default=True, required=False), sysname=dict(arg_type="str", default="", required=False), automove=dict( arg_type="str", - default="AUTOMOVE", - choices=["AUTOMOVE", "NOAUTOMOVE", "UNMOUNT"], + default="automove", + choices=["automove", "noautomove", "unmount"], required=False, ), automove_list=dict(arg_type="str", default="", required=False), diff --git a/plugins/modules/zos_mvs_raw.py b/plugins/modules/zos_mvs_raw.py index a440c31c6..bcac50a63 100644 --- a/plugins/modules/zos_mvs_raw.py +++ b/plugins/modules/zos_mvs_raw.py @@ -96,16 +96,16 @@ - Maps to DSNTYPE on z/OS. type: str choices: - - LIBRARY - - PDS - - PDSE - - LARGE - - BASIC - - SEQ - - RRDS - - ESDS - - LDS - - KSDS + - library + - pds + - pdse + - large + - basic + - seq + - rrds + - esds + - lds + - ksds disposition: description: - I(disposition) indicates the status of a data set. @@ -125,9 +125,7 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog disposition_abnormal: description: @@ -138,32 +136,30 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog reuse: description: - - Determines if a data set should be reused if I(disposition=NEW) and if a data set with a matching name already exists. + - Determines if a data set should be reused if I(disposition=new) and if a data set with a matching name already exists. - If I(reuse=true), I(disposition) will be automatically switched to C(SHR). - If I(reuse=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(replace). - - I(reuse) is only considered when I(disposition=NEW) + - I(reuse) is only considered when I(disposition=new) type: bool default: false replace: description: - - Determines if a data set should be replaced if I(disposition=NEW) and a data set with a matching name already exists. + - Determines if a data set should be replaced if I(disposition=new) and a data set with a matching name already exists. - If I(replace=true), the original data set will be deleted, and a new data set created. - If I(replace=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(reuse). - - I(replace) is only considered when I(disposition=NEW) + - I(replace) is only considered when I(disposition=new) - I(replace) will result in loss of all data in the original data set unless I(backup) is specified. type: bool default: false backup: description: - - Determines if a backup should be made of an existing data set when I(disposition=NEW), I(replace=true), + - Determines if a backup should be made of an existing data set when I(disposition=new), I(replace=true), and a data set with the desired name is found. - I(backup) is only used when I(replace=true). type: bool @@ -174,12 +170,12 @@ using I(space_primary) and I(space_secondary). type: str choices: - - TRK - - CYL - - B - - K - - M - - G + - trk + - cyl + - b + - k + - m + - g space_primary: description: - The primary amount of space to allocate for a new data set. @@ -260,8 +256,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD1 on z/OS. type: str required: true @@ -289,8 +285,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD2 on z/OS. type: str required: true @@ -316,7 +312,7 @@ - The logical record length. (e.g C(80)). - For variable data sets, the length must include the 4-byte prefix area. - "Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0." - - Valid values are (1-32760 for non-vsam, 1-32761 for vsam). + - Valid values are (1-32760 for non-VSAM, 1-32761 for VSAM). - Maps to LRECL on z/OS. type: int required: false @@ -325,11 +321,11 @@ - The format and characteristics of the records for new data set. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -505,11 +501,11 @@ a UNIX file would normally be treated as a stream of bytes. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -717,16 +713,16 @@ - Maps to DSNTYPE on z/OS. type: str choices: - - LIBRARY - - PDS - - PDSE - - LARGE - - BASIC - - SEQ - - RRDS - - ESDS - - LDS - - KSDS + - library + - pds + - pdse + - large + - basic + - seq + - rrds + - esds + - lds + - ksds disposition: description: - I(disposition) indicates the status of a data set. @@ -746,9 +742,7 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog disposition_abnormal: description: @@ -759,32 +753,30 @@ choices: - delete - keep - - catlg - catalog - - uncatlg - uncatalog reuse: description: - - Determines if data set should be reused if I(disposition=NEW) and a data set with matching name already exists. + - Determines if data set should be reused if I(disposition=new) and a data set with matching name already exists. - If I(reuse=true), I(disposition) will be automatically switched to C(SHR). - If I(reuse=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(replace). - - I(reuse) is only considered when I(disposition=NEW) + - I(reuse) is only considered when I(disposition=new) type: bool default: false replace: description: - - Determines if data set should be replaced if I(disposition=NEW) and a data set with matching name already exists. + - Determines if data set should be replaced if I(disposition=new) and a data set with matching name already exists. - If I(replace=true), the original data set will be deleted, and a new data set created. - If I(replace=false), and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with I(reuse). - - I(replace) is only considered when I(disposition=NEW) + - I(replace) is only considered when I(disposition=new) - I(replace) will result in loss of all data in the original data set unless I(backup) is specified. type: bool default: false backup: description: - - Determines if a backup should be made of existing data set when I(disposition=NEW), I(replace=true), + - Determines if a backup should be made of existing data set when I(disposition=new), I(replace=true), and a data set with the desired name is found. - I(backup) is only used when I(replace=true). type: bool @@ -795,12 +787,12 @@ using I(space_primary) and I(space_secondary). type: str choices: - - TRK - - CYL - - B - - K - - M - - G + - trk + - cyl + - b + - k + - m + - g space_primary: description: - The primary amount of space to allocate for a new data set. @@ -881,8 +873,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD1 on z/OS. type: str required: true @@ -910,8 +902,8 @@ description: - How the label for the key encrypting key specified by I(label) is encoded by the Encryption Key Manager. - - I(encoding) can either be set to C(L) for label encoding, - or C(H) for hash encoding. + - I(encoding) can either be set to C(l) for label encoding, + or C(h) for hash encoding. - Maps to KEYCD2 on z/OS. type: str required: true @@ -946,11 +938,11 @@ - The format and characteristics of the records for new data set. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -988,7 +980,7 @@ path: description: - The path to an existing UNIX file. - - Or provide the path to an new created UNIX file when I(status_group=OCREAT). + - Or provide the path to an new created UNIX file when I(status_group=ocreat). - The provided path must be absolute. required: true type: str @@ -1124,11 +1116,11 @@ a UNIX file would normally be treated as a stream of bytes. type: str choices: - - U - - VB - - VBA - - FB - - FBA + - u + - vb + - vba + - fb + - fba return_content: description: - Determines how content should be returned to the user. @@ -1300,13 +1292,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1324,13 +1316,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1369,13 +1361,13 @@ data_set_name: mypgm.output.ds disposition: new reuse: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1398,15 +1390,15 @@ disposition: new replace: yes backup: yes - type: SEQ + type: seq space_primary: 5 space_secondary: 1 - space_type: M + space_type: m volumes: - "000000" - "111111" - "SCR002" - record_format: FB + record_format: fb return_content: type: text - dd_input: @@ -1641,13 +1633,13 @@ def run_module(): disposition=dict(type="str", choices=["new", "shr", "mod", "old"]), disposition_normal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), disposition_abnormal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), - space_type=dict(type="str", choices=["TRK", "CYL", "B", "K", "M", "G"]), + space_type=dict(type="str", choices=["trk", "cyl", "b", "k", "m", "g"]), space_primary=dict(type="int"), space_secondary=dict(type="int"), volumes=dict(type="raw"), @@ -1660,16 +1652,16 @@ def run_module(): type=dict( type="str", choices=[ - "LIBRARY", - "PDS", - "PDSE", - "SEQ", - "BASIC", - "LARGE", - "KSDS", - "RRDS", - "LDS", - "ESDS", + "library", + "pds", + "pdse", + "seq", + "basic", + "large", + "ksds", + "rrds", + "lds", + "esds", ], ), encryption_key_1=dict( @@ -1691,7 +1683,7 @@ def run_module(): key_length=dict(type="int", no_log=False), key_offset=dict(type="int", no_log=False), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -1766,7 +1758,7 @@ def run_module(): ), block_size=dict(type="int"), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -1884,13 +1876,13 @@ def parse_and_validate_args(params): disposition=dict(type="str", choices=["new", "shr", "mod", "old"]), disposition_normal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), disposition_abnormal=dict( type="str", - choices=["delete", "keep", "catalog", "uncatalog", "catlg", "uncatlg"], + choices=["delete", "keep", "catalog", "uncatalog"], ), - space_type=dict(type="str", choices=["TRK", "CYL", "B", "K", "M", "G"]), + space_type=dict(type="str", choices=["trk", "cyl", "b", "k", "m", "g"]), space_primary=dict(type="int"), space_secondary=dict(type="int"), volumes=dict(type=volumes), @@ -1903,16 +1895,16 @@ def parse_and_validate_args(params): type=dict( type="str", choices=[ - "LIBRARY", - "PDS", - "PDSE", - "SEQ", - "BASIC", - "LARGE", - "KSDS", - "RRDS", - "LDS", - "ESDS", + "library", + "pds", + "pdse", + "seq", + "basic", + "large", + "ksds", + "rrds", + "lds", + "esds", ], ), encryption_key_1=dict( @@ -1936,7 +1928,7 @@ def parse_and_validate_args(params): type=key_offset, default=key_offset_default, dependencies=["type"] ), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -1992,7 +1984,7 @@ def parse_and_validate_args(params): ), block_size=dict(type="int"), record_length=dict(type="int"), - record_format=dict(type="str", choices=["U", "VB", "VBA", "FB", "FBA"]), + record_format=dict(type="str", choices=["u", "vb", "vba", "fb", "fba"]), return_content=dict( type="dict", options=dict( @@ -2084,8 +2076,8 @@ def key_length(contents, dependencies): """ if contents is None: return contents - if contents is not None and dependencies.get("type") != "KSDS": - raise ValueError('key_length is only valid when "type=KSDS".') + if contents is not None and dependencies.get("type") != "ksds": + raise ValueError('key_length is only valid when "type=ksds".') if not re.fullmatch(r"[0-9]+", str(contents)): raise ValueError( 'Invalid argument "{0}" for type "key_length".'.format(str(contents)) @@ -2105,8 +2097,8 @@ def key_offset(contents, dependencies): """ if contents is None: return contents - if contents is not None and dependencies.get("type") != "KSDS": - raise ValueError('key_offset is only valid when "type=KSDS".') + if contents is not None and dependencies.get("type") != "ksds": + raise ValueError('key_offset is only valid when "type=ksds".') if not re.fullmatch(r"[0-9]+", str(contents)): raise ValueError( @@ -2127,9 +2119,9 @@ def key_length_default(contents, dependencies): """ KEY_LENGTH = 5 length = None - if contents is None and dependencies.get("type") == "KSDS": + if contents is None and dependencies.get("type") == "ksds": length = KEY_LENGTH - elif dependencies.get("type") == "KSDS": + elif dependencies.get("type") == "ksds": length = contents return length @@ -2145,9 +2137,9 @@ def key_offset_default(contents, dependencies): """ KEY_OFFSET = 0 offset = None - if contents is None and dependencies.get("type") == "KSDS": + if contents is None and dependencies.get("type") == "ksds": offset = KEY_OFFSET - elif dependencies.get("type") == "KSDS": + elif dependencies.get("type") == "ksds": offset = contents return offset diff --git a/plugins/modules/zos_unarchive.py b/plugins/modules/zos_unarchive.py index aa315b3fb..31d709a3a 100644 --- a/plugins/modules/zos_unarchive.py +++ b/plugins/modules/zos_unarchive.py @@ -181,11 +181,11 @@ - Organization of the destination type: str required: false - default: SEQ + default: seq choices: - - SEQ - - PDS - - PDSE + - seq + - pds + - pdse space_primary: description: - If the destination I(dest) data set does not exist , this sets the @@ -204,28 +204,28 @@ description: - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are C(K), C(M), C(G), C(CYL), and C(TRK). + - Valid units of size are C(k), C(m), C(g), C(cyl), and C(trk). type: str choices: - - K - - M - - G - - CYL - - TRK + - k + - m + - g + - cyl + - trk required: false record_format: description: - If the destination data set does not exist, this sets the format of the - data set. (e.g C(FB)) - - Choices are case-insensitive. + data set. (e.g C(fb)) + - Choices are case-sensitive. required: false choices: - - FB - - VB - - FBA - - VBA - - U + - fb + - vb + - fba + - vba + - u type: str record_length: description: @@ -249,15 +249,15 @@ key_offset: description: - The key offset to use when creating a KSDS data set. - - I(key_offset) is required when I(type=KSDS). - - I(key_offset) should only be provided when I(type=KSDS) + - I(key_offset) is required when I(type=ksds). + - I(key_offset) should only be provided when I(type=ksds) type: int required: false key_length: description: - The key length to use when creating a KSDS data set. - - I(key_length) is required when I(type=KSDS). - - I(key_length) should only be provided when I(type=KSDS) + - I(key_length) is required when I(type=ksds). + - I(key_length) should only be provided when I(type=ksds) type: int required: false sms_storage_class: @@ -695,11 +695,11 @@ def _create_dest_data_set( temp_ds = datasets.tmp_name(high_level_qualifier=hlq) arguments.update(name=temp_ds) if record_format is None: - arguments.update(record_format="FB") + arguments.update(record_format="fb") if record_length is None: arguments.update(record_length=80) if type is None: - arguments.update(type="SEQ") + arguments.update(type="seq") if space_primary is None: arguments.update(space_primary=self._compute_dest_data_set_size()) arguments.pop("self") @@ -802,8 +802,8 @@ def extract_src(self): temp_ds, rc = self._create_dest_data_set(**self.dest_data_set) rc = self.unpack(self.src, temp_ds) else: - temp_ds, rc = self._create_dest_data_set(type="SEQ", - record_format="U", + temp_ds, rc = self._create_dest_data_set(type="seq", + record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True) @@ -823,7 +823,7 @@ def _list_content(self, source): self._get_restored_datasets(out) def list_archive_content(self): - temp_ds, rc = self._create_dest_data_set(type="SEQ", record_format="U", record_length=0, tmp_hlq=self.tmphlq, replace=True) + temp_ds, rc = self._create_dest_data_set(type="seq", record_format="u", record_length=0, tmp_hlq=self.tmphlq, replace=True) self.unpack(self.src, temp_ds) self._list_content(temp_ds) datasets.delete(temp_ds) @@ -1026,9 +1026,9 @@ def run_module(): ), type=dict( type='str', - choices=['SEQ', 'PDS', 'PDSE'], + choices=['seq', 'pds', 'pdse'], required=False, - default='SEQ', + default='seq', ), space_primary=dict( type='int', required=False), @@ -1036,12 +1036,12 @@ def run_module(): type='int', required=False), space_type=dict( type='str', - choices=['K', 'M', 'G', 'CYL', 'TRK'], + choices=['k', 'm', 'g', 'cyl', 'trk'], required=False, ), record_format=dict( type='str', - choices=["FB", "VB", "FBA", "VBA", "U"], + choices=["fb", "vb", "fba", "vba", "u"], required=False ), record_length=dict(type='int', required=False), @@ -1107,7 +1107,7 @@ def run_module(): required=False, options=dict( name=dict(arg_type='str', required=False), - type=dict(arg_type='str', required=False, default="SEQ"), + type=dict(arg_type='str', required=False, default="seq"), space_primary=dict(arg_type='int', required=False), space_secondary=dict( arg_type='int', required=False), diff --git a/tests/functional/modules/test_zos_archive_func.py b/tests/functional/modules/test_zos_archive_func.py index a9bfd658c..f6b1140fa 100644 --- a/tests/functional/modules/test_zos_archive_func.py +++ b/tests/functional/modules/test_zos_archive_func.py @@ -336,16 +336,16 @@ def test_uss_archive_remove_targets(ansible_zos_module, format): ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2", "MEM3"]), - dict(dstype="PDSE", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="pdse", members=["MEM1", "MEM2", "MEM3"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB"], + "record_format", ["fb", "vb"], ) def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -366,7 +366,7 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -375,7 +375,7 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -388,7 +388,7 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") archive_result = hosts.all.zos_archive( src=src_data_set, dest=archive_data_set, @@ -415,16 +415,16 @@ def test_mvs_archive_single_dataset(ansible_zos_module, format, data_set, record ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2", "MEM3"]), - dict(dstype="PDSE", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="pdse", members=["MEM1", "MEM2", "MEM3"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB"], + "record_format", ["fb", "vb"], ) def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -445,7 +445,7 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -454,7 +454,7 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -468,7 +468,7 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data format_dict = dict(name=format) format_dict["format_options"] = dict(use_adrdssu=True) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") archive_result = hosts.all.zos_archive( src=src_data_set, dest=archive_data_set, @@ -495,9 +495,9 @@ def test_mvs_archive_single_dataset_use_adrdssu(ansible_zos_module, format, data ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2", "MEM3"]), - dict(dstype="PDSE", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2", "MEM3"]), + dict(dstype="pdse", members=["MEM1", "MEM2", "MEM3"]), ] ) def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, data_set): @@ -514,11 +514,11 @@ def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, d name=src_data_set, type=data_set.get("dstype"), state="present", - record_format="FB", + record_format="fb", replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -536,7 +536,7 @@ def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, d format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") archive_result = hosts.all.zos_archive( src=src_data_set, dest=archive_data_set, @@ -566,9 +566,9 @@ def test_mvs_archive_single_data_set_remove_target(ansible_zos_module, format, d ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): @@ -582,7 +582,7 @@ def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -600,7 +600,7 @@ def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( src="{0}*".format(src_data_set), @@ -629,9 +629,9 @@ def test_mvs_archive_multiple_data_sets(ansible_zos_module, format, data_set): ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, format, data_set): @@ -645,7 +645,7 @@ def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, forma n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -663,7 +663,7 @@ def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, forma format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) exclude = "{0}1".format(src_data_set) archive_result = hosts.all.zos_archive( @@ -697,9 +697,9 @@ def test_mvs_archive_multiple_data_sets_with_exclusion(ansible_zos_module, forma ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, data_set): @@ -713,7 +713,7 @@ def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, d n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -731,7 +731,7 @@ def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, d format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( src="{0}*".format(src_data_set), @@ -762,9 +762,9 @@ def test_mvs_archive_multiple_data_sets_and_remove(ansible_zos_module, format, d ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, data_set): @@ -778,7 +778,7 @@ def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, n=3, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -801,7 +801,7 @@ def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( src=path_list, @@ -836,9 +836,9 @@ def test_mvs_archive_multiple_data_sets_with_missing(ansible_zos_module, format, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) def test_mvs_archive_single_dataset_force_lock(ansible_zos_module, format, data_set): @@ -858,7 +858,7 @@ def test_mvs_archive_single_dataset_force_lock(ansible_zos_module, format, data_ replace=True, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{src_data_set}({member})", @@ -876,7 +876,7 @@ def test_mvs_archive_single_dataset_force_lock(ansible_zos_module, format, data_ format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") # copy/compile c program and copy jcl to hold data set lock for n seconds in background(&) hosts.all.shell(cmd="echo \"{0}\" > {1}".format(c_pgm, '/tmp/disp_shr/pdse-lock.c')) diff --git a/tests/functional/modules/test_zos_backup_restore.py b/tests/functional/modules/test_zos_backup_restore.py index a35750b63..ca7ef740a 100644 --- a/tests/functional/modules/test_zos_backup_restore.py +++ b/tests/functional/modules/test_zos_backup_restore.py @@ -307,16 +307,16 @@ def test_backup_and_restore_of_data_set( @pytest.mark.parametrize( "backup_name,space,space_type", [ - (DATA_SET_BACKUP_LOCATION, 10, "M"), - (DATA_SET_BACKUP_LOCATION, 10000, "K"), + (DATA_SET_BACKUP_LOCATION, 10, "m"), + (DATA_SET_BACKUP_LOCATION, 10000, "k"), (DATA_SET_BACKUP_LOCATION, 10, None), - (DATA_SET_BACKUP_LOCATION, 2, "CYL"), - (DATA_SET_BACKUP_LOCATION, 10, "TRK"), - (UNIX_BACKUP_LOCATION, 10, "M"), - (UNIX_BACKUP_LOCATION, 10000, "K"), + (DATA_SET_BACKUP_LOCATION, 2, "cyl"), + (DATA_SET_BACKUP_LOCATION, 10, "trk"), + (UNIX_BACKUP_LOCATION, 10, "m"), + (UNIX_BACKUP_LOCATION, 10000, "k"), (UNIX_BACKUP_LOCATION, 10, None), - (UNIX_BACKUP_LOCATION, 2, "CYL"), - (UNIX_BACKUP_LOCATION, 10, "TRK"), + (UNIX_BACKUP_LOCATION, 2, "cyl"), + (UNIX_BACKUP_LOCATION, 10, "trk"), ], ) def test_backup_and_restore_of_data_set_various_space_measurements( @@ -693,7 +693,7 @@ def test_restore_of_data_set_when_volume_does_not_exist(ansible_zos_module): # backup_name=DATA_SET_BACKUP_LOCATION, # overwrite=True, # space=500, -# space_type="M", +# space_type="m", # ) # assert_module_did_not_fail(results) # assert_data_set_or_file_exists(hosts, DATA_SET_BACKUP_LOCATION) @@ -706,7 +706,7 @@ def test_restore_of_data_set_when_volume_does_not_exist(ansible_zos_module): # full_volume=True, # sms_storage_class="DB2SMS10", # space=500, -# space_type="M", +# space_type="m", # ) # assert_module_did_not_fail(results) # assert_data_set_exists_on_volume(hosts, data_set_name, VOLUME) diff --git a/tests/functional/modules/test_zos_blockinfile_func.py b/tests/functional/modules/test_zos_blockinfile_func.py index 197bc9fa3..508a2ce8d 100644 --- a/tests/functional/modules/test_zos_blockinfile_func.py +++ b/tests/functional/modules/test_zos_blockinfile_func.py @@ -428,10 +428,10 @@ ENCODING = ['IBM-1047', 'ISO8859-1', 'UTF-8'] # supported data set types -DS_TYPE = ['SEQ', 'PDS', 'PDSE'] +DS_TYPE = ['seq', 'pds', 'pdse'] # not supported data set types -NS_DS_TYPE = ['ESDS', 'RRDS', 'LDS'] +NS_DS_TYPE = ['esds', 'rrds', 'lds'] USS_BACKUP_FILE = "/tmp/backup.tmp" BACKUP_OPTIONS = [None, "BLOCKIF.TEST.BACKUP", "BLOCKIF.TEST.BACKUP(BACKUP)"] @@ -450,7 +450,7 @@ def set_ds_environment(ansible_zos_module, TEMP_FILE, DS_NAME, DS_TYPE, CONTENT) hosts = ansible_zos_module hosts.all.shell(cmd="echo \"{0}\" > {1}".format(CONTENT, TEMP_FILE)) hosts.all.zos_data_set(name=DS_NAME, type=DS_TYPE) - if DS_TYPE in ["PDS", "PDSE"]: + if DS_TYPE in ["pds", "pdse"]: DS_FULL_NAME = DS_NAME + "(MEM)" hosts.all.zos_data_set(name=DS_FULL_NAME, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), DS_FULL_NAME) @@ -1138,7 +1138,7 @@ def test_ds_block_absent(ansible_zos_module, dstype): def test_ds_tmp_hlq_option(ansible_zos_module): # This TMPHLQ only works with sequential datasets hosts = ansible_zos_module - ds_type = "SEQ" + ds_type = "seq" params=dict(insertafter="EOF", block="export ZOAU_ROOT\n", state="present", backup=True, tmp_hlq="TMPHLQ") kwargs = dict(backup_name=r"TMPHLQ\..") content = TEST_CONTENT @@ -1228,7 +1228,7 @@ def test_ds_block_insertafter_regex_force(ansible_zos_module, dstype): MEMBER_1, MEMBER_2 = "MEM1", "MEM2" TEMP_FILE = "/tmp/{0}".format(MEMBER_2) content = TEST_CONTENT - if ds_type == "SEQ": + if ds_type == "seq": params["path"] = default_data_set_name+".{0}".format(MEMBER_2) else: params["path"] = default_data_set_name+"({0})".format(MEMBER_2) @@ -1245,7 +1245,7 @@ def test_ds_block_insertafter_regex_force(ansible_zos_module, dstype): ] ) # write memeber to verify cases - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), params["path"]) else: cmdStr = "cp {0} \"//'{1}'\" ".format(quote(TEMP_FILE), params["path"]) @@ -1321,7 +1321,7 @@ def test_ds_encoding(ansible_zos_module, encoding, dstype): hosts.all.shell(cmd="echo \"{0}\" > {1}".format(content, temp_file)) hosts.all.zos_encode(src=temp_file, dest=temp_file, from_encoding="IBM-1047", to_encoding=params["encoding"]) hosts.all.zos_data_set(name=ds_name, type=ds_type) - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: ds_full_name = ds_name + "(MEM)" hosts.all.zos_data_set(name=ds_full_name, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(temp_file), ds_full_name) @@ -1360,7 +1360,7 @@ def test_not_exist_ds_block_insertafter_regex(ansible_zos_module): @pytest.mark.ds def test_ds_block_insertafter_nomatch_eof_insert(ansible_zos_module): hosts = ansible_zos_module - ds_type = 'SEQ' + ds_type = 'seq' params=dict(insertafter="EOF", block="export ZOAU_ROOT\nexport ZOAU_HOME\nexport ZOAU_DIR", state="present") params["insertafter"] = 'SOME_NON_EXISTING_PATTERN' ds_name = get_tmp_ds_name() @@ -1413,7 +1413,7 @@ def test_ds_not_supported(ansible_zos_module, dstype): @pytest.mark.ds -@pytest.mark.parametrize("dstype", ["PDS","PDSE"]) +@pytest.mark.parametrize("dstype", ["pds","pdse"]) def test_ds_block_insertafter_regex_fail(ansible_zos_module, dstype): hosts = ansible_zos_module ds_type = dstype diff --git a/tests/functional/modules/test_zos_copy_func.py b/tests/functional/modules/test_zos_copy_func.py index 6e6a9a073..13e6d367b 100644 --- a/tests/functional/modules/test_zos_copy_func.py +++ b/tests/functional/modules/test_zos_copy_func.py @@ -260,7 +260,7 @@ def populate_partitioned_data_set(hosts, name, ds_type, members=None): Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the data set. - ds_type (str) -- Type of the data set (either PDS or PDSE). + ds_type (str) -- Type of the data set (either pds or pdse). members (list, optional) -- List of member names to create. """ if not members: @@ -282,9 +282,9 @@ def get_listcat_information(hosts, name, ds_type): Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the data set. - ds_type (str) -- Type of data set ("SEQ", "PDS", "PDSE", "KSDS"). + ds_type (str) -- Type of data set ("seq", "pds", "pdse", "ksds"). """ - if ds_type.upper() == "KSDS": + if ds_type == "ksds": idcams_input = " LISTCAT ENT('{0}') DATA ALL".format(name) else: idcams_input = " LISTCAT ENTRIES('{0}')".format(name) @@ -311,7 +311,7 @@ def create_vsam_data_set(hosts, name, ds_type, add_data=False, key_length=None, Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the VSAM data set. - type (str) -- Type of the VSAM (KSDS, ESDS, RRDS, LDS) + type (str) -- Type of the VSAM (ksds, esds, rrds, lds) add_data (bool, optional) -- Whether to add records to the VSAM. key_length (int, optional) -- Key length (only for KSDS data sets). key_offset (int, optional) -- Key offset (only for KSDS data sets). @@ -321,7 +321,7 @@ def create_vsam_data_set(hosts, name, ds_type, add_data=False, key_length=None, type=ds_type, state="present" ) - if ds_type == "KSDS": + if ds_type == "ksds": params["key_length"] = key_length params["key_offset"] = key_offset @@ -370,7 +370,7 @@ def link_loadlib_from_cobol(hosts, cobol_src_pds, cobol_src_mem, loadlib_pds, lo # Submit link JCL. job_result = hosts.all.zos_job_submit( src="/tmp/link.jcl", - location="USS", + location="uss", wait_time_s=60 ) for result in job_result.contacted.values(): @@ -1690,7 +1690,7 @@ def test_copy_seq_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="SEQ", + type="seq", replace=True ) @@ -1739,7 +1739,7 @@ def test_copy_seq_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="SEQ", + type="seq", replace=True ) @@ -1790,7 +1790,7 @@ def test_copy_partitioned_data_set_to_seq_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -1840,7 +1840,7 @@ def test_copy_partitioned_data_set_to_partitioned_asa(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -1890,8 +1890,8 @@ def test_copy_asa_data_set_to_text_file(ansible_zos_module): hosts.all.zos_data_set( name=src, state="present", - type="SEQ", - record_format="FBA", + type="seq", + record_format="fba", record_length=80, block_size=27920, replace=True @@ -1966,13 +1966,13 @@ def test_ensure_copy_file_does_not_change_permission_on_dest(ansible_zos_module, @pytest.mark.seq -@pytest.mark.parametrize("ds_type", [ "PDS", "PDSE", "SEQ"]) +@pytest.mark.parametrize("ds_type", [ "pds", "pdse", "seq"]) def test_copy_dest_lock(ansible_zos_module, ds_type): hosts = ansible_zos_module data_set_1 = get_tmp_ds_name() data_set_2 = get_tmp_ds_name() member_1 = "MEM1" - if ds_type == "PDS" or ds_type == "PDSE": + if ds_type == "pds" or ds_type == "pdse": src_data_set = data_set_1 + "({0})".format(member_1) dest_data_set = data_set_2 + "({0})".format(member_1) else: @@ -1982,9 +1982,9 @@ def test_copy_dest_lock(ansible_zos_module, ds_type): hosts = ansible_zos_module hosts.all.zos_data_set(name=data_set_1, state="present", type=ds_type, replace=True) hosts.all.zos_data_set(name=data_set_2, state="present", type=ds_type, replace=True) - if ds_type == "PDS" or ds_type == "PDSE": - hosts.all.zos_data_set(name=src_data_set, state="present", type="MEMBER", replace=True) - hosts.all.zos_data_set(name=dest_data_set, state="present", type="MEMBER", replace=True) + if ds_type == "pds" or ds_type == "pdse": + hosts.all.zos_data_set(name=src_data_set, state="present", type="member", replace=True) + hosts.all.zos_data_set(name=dest_data_set, state="present", type="member", replace=True) # copy text_in source hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(DUMMY_DATA, src_data_set)) # copy/compile c program and copy jcl to hold data set lock for n seconds in background(&) @@ -2272,7 +2272,7 @@ def test_copy_file_to_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2300,7 +2300,7 @@ def test_copy_file_to_non_empty_sequential_data_set(ansible_zos_module, src): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, remote_src=src["is_remote"], force=src["force"]) @@ -2438,7 +2438,7 @@ def test_copy_ps_to_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) verify_copy = hosts.all.shell( @@ -2464,7 +2464,7 @@ def test_copy_ps_to_non_empty_ps(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content="Inline content", dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2495,7 +2495,7 @@ def test_copy_ps_to_non_empty_ps_with_special_chars(ansible_zos_module, force): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="absent") + hosts.all.zos_data_set(name=dest, type="seq", state="absent") hosts.all.zos_copy(content=DUMMY_DATA_SPECIAL_CHARS, dest=dest) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest, remote_src=True, force=force) @@ -2526,7 +2526,7 @@ def test_backup_sequential_data_set(ansible_zos_module, backup): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") if backup: copy_res = hosts.all.zos_copy(src=src, dest=dest, force=True, backup=True, backup_name=backup) @@ -2571,10 +2571,10 @@ def test_copy_file_to_non_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="PDSE", + type="pdse", space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=80, replace=True ) @@ -2617,14 +2617,14 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): try: hosts.all.zos_data_set( name=data_set, - type="PDSE", + type="pdse", space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=80, replace=True ) - hosts.all.zos_data_set(name=dest, type="MEMBER", state="present") + hosts.all.zos_data_set(name=dest, type="member", state="present") if src["is_file"]: copy_result = hosts.all.zos_copy(src=src["src"], dest=dest, force=src["force"], remote_src=src["is_remote"]) @@ -2653,31 +2653,31 @@ def test_copy_file_to_existing_member(ansible_zos_module, src): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="SEQ", is_binary=False), - dict(type="SEQ", is_binary=True), - dict(type="PDS", is_binary=False), - dict(type="PDS", is_binary=True), - dict(type="PDSE", is_binary=False), - dict(type="PDSE", is_binary=True) + dict(type="seq", is_binary=False), + dict(type="seq", is_binary=True), + dict(type="pds", is_binary=False), + dict(type="pds", is_binary=True), + dict(type="pdse", is_binary=False), + dict(type="pdse", is_binary=True) ]) def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "SEQ" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "SEQ": - hosts.all.zos_data_set(name=src, type="MEMBER") + if args["type"] != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) + hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) copy_result = hosts.all.zos_copy(src=src, dest=dest, is_binary=args["is_binary"], remote_src=True) verify_copy = hosts.all.shell( @@ -2700,32 +2700,32 @@ def test_copy_data_set_to_non_existing_member(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="SEQ", force=False), - dict(type="SEQ", force=True), - dict(type="PDS", force=False), - dict(type="PDS", force=True), - dict(type="PDSE", force=False), - dict(type="PDSE", force=True) + dict(type="seq", force=False), + dict(type="seq", force=True), + dict(type="pds", force=False), + dict(type="pds", force=True), + dict(type="pdse", force=False), + dict(type="pdse", force=True) ]) def test_copy_data_set_to_existing_member(ansible_zos_module, args): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if args["type"] == "SEQ" else "{0}(TEST)".format(src_data_set) + src = src_data_set if args["type"] == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=args["type"]) - if args["type"] != "SEQ": - hosts.all.zos_data_set(name=src, type="MEMBER") + if args["type"] != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), executable=SHELL_EXECUTABLE ) - hosts.all.zos_data_set(name=dest_data_set, type="PDSE", replace=True) - hosts.all.zos_data_set(name=dest, type="MEMBER") + hosts.all.zos_data_set(name=dest_data_set, type="pdse", replace=True) + hosts.all.zos_data_set(name=dest, type="member") copy_result = hosts.all.zos_copy(src=src, dest=dest, force=args["force"], remote_src=True) verify_copy = hosts.all.shell( @@ -2844,7 +2844,7 @@ def test_copy_dir_crlf_endings_to_non_existing_pdse(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_dir = "/tmp/testdir" @@ -2859,8 +2859,8 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): name=dest, type=src_type, space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=80, ) @@ -2883,18 +2883,18 @@ def test_copy_dir_to_existing_pdse(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["SEQ", "PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): hosts = ansible_zos_module src_data_set = get_tmp_ds_name() - src = src_data_set if src_type == "SEQ" else "{0}(TEST)".format(src_data_set) + src = src_data_set if src_type == "seq" else "{0}(TEST)".format(src_data_set) dest_data_set = get_tmp_ds_name() dest = "{0}(MEMBER)".format(dest_data_set) try: hosts.all.zos_data_set(name=src_data_set, type=src_type) - if src_type != "SEQ": - hosts.all.zos_data_set(name=src, type="MEMBER") + if src_type != "seq": + hosts.all.zos_data_set(name=src, type="member") hosts.all.shell( "decho 'Records for test' '{0}'".format(src), @@ -2924,10 +2924,10 @@ def test_copy_data_set_to_non_existing_pdse(ansible_zos_module, src_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(src_type="PDS", dest_type="PDS"), - dict(src_type="PDS", dest_type="PDSE"), - dict(src_type="PDSE", dest_type="PDS"), - dict(src_type="PDSE", dest_type="PDSE"), + dict(src_type="pds", dest_type="pds"), + dict(src_type="pds", dest_type="pdse"), + dict(src_type="pdse", dest_type="pds"), + dict(src_type="pdse", dest_type="pdse"), ]) def test_copy_pds_to_existing_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -2979,9 +2979,9 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -2990,12 +2990,12 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3012,24 +3012,24 @@ def test_copy_pds_loadlib_member_to_pds_loadlib_member(ansible_zos_module, is_cr hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) # pre-allocate dest loadlib to copy over with an alias. hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3117,20 +3117,20 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3138,23 +3138,23 @@ def test_copy_pds_loadlib_member_to_uss_to_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3267,9 +3267,9 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3278,12 +3278,12 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3306,24 +3306,24 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) # allocate dest loadlib to copy over with an alias. hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3337,12 +3337,12 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): executable=True, aliases=False, dest_data_set={ - 'type': "LIBRARY", - 'record_format': "U", + 'type': "library", + 'record_format': "u", 'record_length': 0, 'block_size': 32760, 'space_primary': 2, - 'space_type': "M", + 'space_type': "m", } ) # copy src loadlib to dest library pds w aliases @@ -3353,12 +3353,12 @@ def test_copy_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): executable=True, aliases=True, dest_data_set={ - 'type': "LIBRARY", - 'record_format': "U", + 'type': "library", + 'record_format': "u", 'record_length': 0, 'block_size': 32760, 'space_primary': 2, - 'space_type': "M", + 'space_type': "m", } ) @@ -3459,9 +3459,9 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3470,12 +3470,12 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3530,12 +3530,12 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3548,12 +3548,12 @@ def test_copy_local_pds_loadlib_to_pds_loadlib(ansible_zos_module, is_created): executable=True, aliases=False, dest_data_set={ - 'type': "PDSE", - 'record_format': "U", + 'type': "pdse", + 'record_format': "u", 'record_length': 0, 'block_size': 32760, 'space_primary': 2, - 'space_type': "M", + 'space_type': "m", } ) else: @@ -3621,9 +3621,9 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=cobol_src_pds, state="present", - type="PDS", + type="pds", space_primary=2, - record_format="FB", + record_format="fb", record_length=80, block_size=3120, replace=True, @@ -3632,12 +3632,12 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=src_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3657,24 +3657,24 @@ def test_copy_pds_loadlib_to_uss_to_pds_loadlib(ansible_zos_module): hosts.all.zos_data_set( name=dest_lib, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) # allocate dest loadlib to copy over with an alias. hosts.all.zos_data_set( name=dest_lib_aliases, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) @@ -3833,12 +3833,12 @@ def test_copy_executables_uss_to_member(ansible_zos_module, is_created): hosts.all.zos_data_set( name=dest, state="present", - type="PDSE", - record_format="U", + type="pdse", + record_format="u", record_length=0, block_size=32760, space_primary=2, - space_type="M", + space_type="m", replace=True ) copy_uss_to_mvs_res = hosts.all.zos_copy( @@ -3884,7 +3884,7 @@ def test_copy_pds_member_with_system_symbol(ansible_zos_module): hosts.all.zos_data_set( name=dest, state="present", - type="PDSE", + type="pdse", replace=True ) @@ -3920,8 +3920,8 @@ def test_copy_multiple_data_set_members(ansible_zos_module): ds_list = ["{0}({1})".format(src, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="PDS") - hosts.all.zos_data_set(name=dest, type="PDS") + hosts.all.zos_data_set(name=src, type="pds") + hosts.all.zos_data_set(name=dest, type="pds") for member in ds_list: hosts.all.shell( @@ -3966,8 +3966,8 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): dest_ds_list = ["{0}({1})".format(dest, member) for member in member_list] try: - hosts.all.zos_data_set(name=src, type="PDS") - hosts.all.zos_data_set(name=dest, type="PDS") + hosts.all.zos_data_set(name=src, type="pds") + hosts.all.zos_data_set(name=dest, type="pds") for src_member in src_ds_list: hosts.all.shell( @@ -4000,7 +4000,7 @@ def test_copy_multiple_data_set_members_in_loop(ansible_zos_module): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("ds_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("ds_type", ["pds", "pdse"]) def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): hosts = ansible_zos_module data_set = get_tmp_ds_name() @@ -4038,10 +4038,10 @@ def test_copy_member_to_non_existing_uss_file(ansible_zos_module, ds_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(ds_type="PDS", force=False), - dict(ds_type="PDS", force=True), - dict(ds_type="PDSE", force=False), - dict(ds_type="PDSE", force=True) + dict(ds_type="pds", force=False), + dict(ds_type="pds", force=True), + dict(ds_type="pdse", force=False), + dict(ds_type="pdse", force=True) ]) def test_copy_member_to_existing_uss_file(ansible_zos_module, args): hosts = ansible_zos_module @@ -4085,7 +4085,7 @@ def test_copy_member_to_existing_uss_file(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4130,7 +4130,7 @@ def test_copy_pdse_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.uss @pytest.mark.pdse @pytest.mark.aliases -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_member_to_uss_dir(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4176,7 +4176,7 @@ def test_copy_member_to_uss_dir(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["pds", "pdse"]) def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): hosts = ansible_zos_module src_ds = get_tmp_ds_name() @@ -4212,10 +4212,10 @@ def test_copy_member_to_non_existing_seq_data_set(ansible_zos_module, src_type): @pytest.mark.seq @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDS", force=False), - dict(type="PDS", force=True), - dict(type="PDSE", force=False), - dict(type="PDSE", force=True), + dict(type="pds", force=False), + dict(type="pds", force=True), + dict(type="pdse", force=False), + dict(type="pdse", force=True), ]) def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): hosts = ansible_zos_module @@ -4224,7 +4224,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): dest = get_tmp_ds_name() try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present", replace=True) + hosts.all.zos_data_set(name=dest, type="seq", state="present", replace=True) hosts.all.zos_data_set(name=src_ds, type=args["type"], state="present") for data_set in [src, dest]: @@ -4257,7 +4257,7 @@ def test_copy_member_to_existing_seq_data_set(ansible_zos_module, args): @pytest.mark.uss @pytest.mark.pdse -@pytest.mark.parametrize("dest_type", ["PDS", "PDSE"]) +@pytest.mark.parametrize("dest_type", ["pds", "pdse"]) def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): hosts = ansible_zos_module src = "/etc/profile" @@ -4267,8 +4267,8 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): hosts.all.zos_data_set( type=dest_type, space_primary=5, - space_type="M", - record_format="FBA", + space_type="m", + record_format="fba", record_length=25, ) @@ -4300,10 +4300,10 @@ def test_copy_file_to_member_convert_encoding(ansible_zos_module, dest_type): @pytest.mark.pdse @pytest.mark.parametrize("args", [ - dict(type="PDS", backup=None), - dict(type="PDS", backup="USER.TEST.PDS.BACKUP"), - dict(type="PDSE", backup=None), - dict(type="PDSE", backup="USER.TEST.PDSE.BACKUP"), + dict(type="pds", backup=None), + dict(type="pds", backup="USER.TEST.PDS.BACKUP"), + dict(type="pdse", backup=None), + dict(type="pdse", backup="USER.TEST.PDSE.BACKUP"), ]) def test_backup_pds(ansible_zos_module, args): hosts = ansible_zos_module @@ -4349,7 +4349,7 @@ def test_backup_pds(ansible_zos_module, args): @pytest.mark.seq @pytest.mark.pdse -@pytest.mark.parametrize("src_type", ["SEQ", "PDS", "PDSE"]) +@pytest.mark.parametrize("src_type", ["seq", "pds", "pdse"]) def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_type): hosts = ansible_zos_module source = get_tmp_ds_name() @@ -4365,8 +4365,8 @@ def test_copy_data_set_to_volume(ansible_zos_module, volumes_on_systems, src_typ try: hosts.all.zos_data_set(name=source, type=src_type, state='present') - if src_type != "SEQ": - hosts.all.zos_data_set(name=source_member, type="MEMBER", state='present') + if src_type != "seq": + hosts.all.zos_data_set(name=source_member, type="member", state='present') copy_res = hosts.all.zos_copy( src=source, @@ -4425,8 +4425,8 @@ def test_copy_ksds_to_existing_ksds(ansible_zos_module, force): dest_ds = get_tmp_ds_name() try: - create_vsam_data_set(hosts, src_ds, "KSDS", add_data=True, key_length=12, key_offset=0) - create_vsam_data_set(hosts, dest_ds, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, src_ds, "ksds", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, dest_ds, "ksds", add_data=True, key_length=12, key_offset=0) copy_res = hosts.all.zos_copy(src=src_ds, dest=dest_ds, remote_src=True, force=force) verify_copy = get_listcat_information(hosts, dest_ds, "ksds") @@ -4461,8 +4461,8 @@ def test_backup_ksds(ansible_zos_module, backup): backup_name = None try: - create_vsam_data_set(hosts, src, "KSDS", add_data=True, key_length=12, key_offset=0) - create_vsam_data_set(hosts, dest, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, src, "ksds", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, dest, "ksds", add_data=True, key_length=12, key_offset=0) if backup: copy_res = hosts.all.zos_copy(src=src, dest=dest, backup=True, backup_name=backup, remote_src=True, force=True) @@ -4544,8 +4544,8 @@ def test_dest_data_set_parameters(ansible_zos_module, volumes_on_systems): volume = volumes.get_available_vol() space_primary = 3 space_secondary = 2 - space_type = "K" - record_format = "VB" + space_type = "k" + record_format = "vb" record_length = 100 block_size = 21000 @@ -4556,7 +4556,7 @@ def test_dest_data_set_parameters(ansible_zos_module, volumes_on_systems): remote_src=True, volume=volume, dest_data_set=dict( - type="SEQ", + type="seq", space_primary=space_primary, space_secondary=space_secondary, space_type=space_type, @@ -4587,7 +4587,7 @@ def test_dest_data_set_parameters(ansible_zos_module, volumes_on_systems): assert len(output_lines) == 5 data_set_attributes = output_lines[2].strip().split() assert len(data_set_attributes) == 4 - assert data_set_attributes[0] == record_format + assert data_set_attributes[0] == record_format.upper() assert data_set_attributes[1] == str(record_length) assert data_set_attributes[2] == str(block_size) assert data_set_attributes[3] == "PS" @@ -4637,7 +4637,7 @@ def test_copy_uss_file_to_existing_sequential_data_set_twice_with_tmphlq_option( src_file = "/etc/profile" tmphlq = "TMPHLQ" try: - hosts.all.zos_data_set(name=dest, type="SEQ", state="present") + hosts.all.zos_data_set(name=dest, type="seq", state="present") copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, force=force) copy_result = hosts.all.zos_copy(src=src_file, dest=dest, remote_src=True, backup=True, tmp_hlq=tmphlq, force=force) diff --git a/tests/functional/modules/test_zos_data_set_func.py b/tests/functional/modules/test_zos_data_set_func.py index 7ab4685c0..f96bfabdc 100644 --- a/tests/functional/modules/test_zos_data_set_func.py +++ b/tests/functional/modules/test_zos_data_set_func.py @@ -29,12 +29,12 @@ data_set_types = [ - ("PDS"), - ("SEQ"), - ("PDSE"), - ("ESDS"), - ("RRDS"), - ("LDS"), + ("pds"), + ("seq"), + ("pdse"), + ("esds"), + ("rrds"), + ("lds"), ] TEMP_PATH = "/tmp/jcl" @@ -161,7 +161,7 @@ def test_data_set_catalog_and_uncatalog(ansible_zos_module, jcl, volumes_on_syst hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS", wait_time_s=30 + src=TEMP_PATH + "/SAMPLE", location="uss", wait_time_s=30 ) # verify data set creation was successful @@ -220,7 +220,7 @@ def test_data_set_present_when_uncataloged(ansible_zos_module, jcl, volumes_on_s hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS" + src=TEMP_PATH + "/SAMPLE", location="uss" ) # verify data set creation was successful for result in results.contacted.values(): @@ -266,7 +266,7 @@ def test_data_set_replacement_when_uncataloged(ansible_zos_module, jcl, volumes_ hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS" + src=TEMP_PATH + "/SAMPLE", location="uss" ) # verify data set creation was successful for result in results.contacted.values(): @@ -314,7 +314,7 @@ def test_data_set_absent_when_uncataloged(ansible_zos_module, jcl, volumes_on_sy hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) results = hosts.all.zos_job_submit( - src=TEMP_PATH + "/SAMPLE", location="USS" + src=TEMP_PATH + "/SAMPLE", location="uss" ) # verify data set creation was successful for result in results.contacted.values(): @@ -351,7 +351,7 @@ def test_data_set_absent_when_uncataloged_and_same_name_cataloged_is_present(ans hosts.all.file(path=TEMP_PATH, state="directory") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_1, dataset)), TEMP_PATH)) - results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="USS") + results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="uss") # verify data set creation was successful for result in results.contacted.values(): @@ -366,7 +366,7 @@ def test_data_set_absent_when_uncataloged_and_same_name_cataloged_is_present(ans hosts.all.file(path=TEMP_PATH + "/SAMPLE", state="absent") hosts.all.shell(cmd=ECHO_COMMAND.format(quote(jcl.format(volume_2, dataset)), TEMP_PATH)) - results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="USS") + results = hosts.all.zos_job_submit(src=TEMP_PATH + "/SAMPLE", location="uss") # verify data set creation was successful for result in results.contacted.values(): @@ -469,7 +469,7 @@ def test_batch_data_set_creation_and_deletion(ansible_zos_module): results = hosts.all.zos_data_set( batch=[ {"name": dataset, "state": "absent"}, - {"name": dataset, "type": "PDS", "state": "present"}, + {"name": dataset, "type": "pds", "state": "present"}, {"name": dataset, "state": "absent"}, ] ) @@ -486,11 +486,11 @@ def test_batch_data_set_and_member_creation(ansible_zos_module): dataset = get_tmp_ds_name(2, 2) results = hosts.all.zos_data_set( batch=[ - {"name": dataset, "type": "PDS", "directory_blocks": 5}, - {"name": dataset + "(newmem1)", "type": "MEMBER"}, + {"name": dataset, "type": "pds", "directory_blocks": 5}, + {"name": dataset + "(newmem1)", "type": "member"}, { "name": dataset + "(newmem2)", - "type": "MEMBER", + "type": "member", "state": "present", }, {"name": dataset, "state": "absent"}, @@ -534,7 +534,7 @@ def test_data_member_force_delete(ansible_zos_module): DEFAULT_DATA_SET_NAME = get_tmp_ds_name(2, 2) # set up: # create pdse - results = hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="present", type="PDSE", replace=True) + results = hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="present", type="pdse", replace=True) for result in results.contacted.values(): assert result.get("changed") is True @@ -543,25 +543,25 @@ def test_data_member_force_delete(ansible_zos_module): batch=[ { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_1), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_2), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_3), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, { "name": DEFAULT_DATA_SET_NAME + "({0})".format(MEMBER_4), - "type": "MEMBER", + "type": "member", "state": "present", "replace": True, }, @@ -590,7 +590,7 @@ def test_data_member_force_delete(ansible_zos_module): results = hosts.all.zos_data_set( name="{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_2), state="absent", - type="MEMBER" + type="member" ) for result in results.contacted.values(): assert result.get("failed") is True @@ -598,7 +598,7 @@ def test_data_member_force_delete(ansible_zos_module): # attempt to delete MEMBER_3 with force option. results = hosts.all.zos_data_set( - name="{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_3), state="absent", type="MEMBER", force=True + name="{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_3), state="absent", type="member", force=True ) for result in results.contacted.values(): assert result.get("changed") is True @@ -610,7 +610,7 @@ def test_data_member_force_delete(ansible_zos_module): { "name": "{0}({1})".format(DEFAULT_DATA_SET_NAME, MEMBER_4), "state": "absent", - "type": "MEMBER", + "type": "member", "force": True } ] @@ -647,9 +647,9 @@ def test_repeated_operations(ansible_zos_module): DEFAULT_DATA_SET_NAME_WITH_MEMBER = DEFAULT_DATA_SET_NAME + "(MEM)" results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="PDS", + type="pds", space_primary=5, - space_type="CYL", + space_type="cyl", record_length=15, replace=True, ) @@ -660,7 +660,7 @@ def test_repeated_operations(ansible_zos_module): results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="PDS", + type="pds", replace=True, ) @@ -669,7 +669,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER", replace=True + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member", replace=True ) for result in results.contacted.values(): @@ -677,7 +677,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER" + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member" ) for result in results.contacted.values(): @@ -685,7 +685,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER", state="absent" + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member", state="absent" ) for result in results.contacted.values(): @@ -693,7 +693,7 @@ def test_repeated_operations(ansible_zos_module): assert result.get("module_stderr") is None results = hosts.all.zos_data_set( - name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="MEMBER", state="absent" + name=DEFAULT_DATA_SET_NAME_WITH_MEMBER, type="member", state="absent" ) for result in results.contacted.values(): @@ -713,9 +713,9 @@ def test_multi_volume_creation_uncatalog_and_catalog_nonvsam(ansible_zos_module, hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="absent") results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="SEQ", + type="seq", space_primary=5, - space_type="CYL", + space_type="cyl", record_length=15, volumes=[volume_1, volume_2], ) @@ -750,11 +750,11 @@ def test_multi_volume_creation_uncatalog_and_catalog_vsam(ansible_zos_module, vo hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="absent") results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, - type="KSDS", + type="ksds", key_length=5, key_offset=0, space_primary=5, - space_type="CYL", + space_type="cyl", volumes=[volume_1, volume_2], ) for result in results.contacted.values(): @@ -843,7 +843,7 @@ def test_data_set_temp_data_set_name_batch(ansible_zos_module): @pytest.mark.parametrize( "filesystem", - ["HFS", "ZFS"], + ["hfs", "zfs"], ) def test_filesystem_create_and_mount(ansible_zos_module, filesystem): fulltest = True @@ -852,7 +852,7 @@ def test_filesystem_create_and_mount(ansible_zos_module, filesystem): try: hosts.all.zos_data_set(name=DEFAULT_DATA_SET_NAME, state="absent") - if filesystem == "HFS": + if filesystem == "hfs": result0 = hosts.all.shell(cmd="zinfo -t sys") for result in result0.contacted.values(): sys_info = result.get("stdout_lines") @@ -909,7 +909,7 @@ def test_data_set_creation_zero_values(ansible_zos_module): results = hosts.all.zos_data_set( name=DEFAULT_DATA_SET_NAME, state="present", - type="KSDS", + type="ksds", replace=True, space_primary=5, space_secondary=0, @@ -941,7 +941,7 @@ def test_data_set_creation_with_tmp_hlq(ansible_zos_module): @pytest.mark.parametrize( "formats", - ["F","FB", "VB", "FBA", "VBA", "U"], + ["f","fb", "vb", "fba", "vba", "u"], ) def test_data_set_f_formats(ansible_zos_module, formats, volumes_on_systems): volumes = Volume_Handler(volumes_on_systems) @@ -955,7 +955,7 @@ def test_data_set_f_formats(ansible_zos_module, formats, volumes_on_systems): state="present", format=formats, space_primary="5", - space_type="M", + space_type="m", volume=volume_1, ) for result in results.contacted.values(): diff --git a/tests/functional/modules/test_zos_encode_func.py b/tests/functional/modules/test_zos_encode_func.py index e017450ff..4b74c8834 100644 --- a/tests/functional/modules/test_zos_encode_func.py +++ b/tests/functional/modules/test_zos_encode_func.py @@ -98,7 +98,7 @@ def create_vsam_data_set(hosts, name, ds_type, add_data=False, key_length=None, type=ds_type, state="present" ) - if ds_type == "KSDS": + if ds_type == "ksds": params["key_length"] = key_length params["key_offset"] = key_offset @@ -545,7 +545,7 @@ def test_uss_encoding_conversion_uss_file_to_mvs_vsam(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(MVS_VS)), TEMP_JCL_PATH) ) results = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) for result in results.contacted.values(): @@ -576,7 +576,7 @@ def test_uss_encoding_conversion_mvs_vsam_to_uss_file(ansible_zos_module): hosts = ansible_zos_module mlq_size = 3 MVS_VS = get_tmp_ds_name(mlq_size) - create_vsam_data_set(hosts, MVS_VS, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, MVS_VS, "ksds", add_data=True, key_length=12, key_offset=0) hosts.all.file(path=USS_DEST_FILE, state="touch") results = hosts.all.zos_encode( src=MVS_VS, @@ -611,7 +611,7 @@ def test_uss_encoding_conversion_mvs_vsam_to_mvs_ps(ansible_zos_module): hosts = ansible_zos_module MVS_PS = get_tmp_ds_name() MVS_VS = get_tmp_ds_name() - create_vsam_data_set(hosts, MVS_VS, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, MVS_VS, "ksds", add_data=True, key_length=12, key_offset=0) hosts.all.zos_data_set(name=MVS_PS, state="absent") hosts.all.zos_data_set(name=MVS_PS, state="present", type="seq", record_length=TEST_DATA_RECORD_LENGTH) results = hosts.all.zos_encode( @@ -635,7 +635,7 @@ def test_uss_encoding_conversion_mvs_vsam_to_mvs_pds_member(ansible_zos_module): hosts = ansible_zos_module MVS_VS = get_tmp_ds_name() MVS_PDS = get_tmp_ds_name() - create_vsam_data_set(hosts, MVS_VS, "KSDS", add_data=True, key_length=12, key_offset=0) + create_vsam_data_set(hosts, MVS_VS, "ksds", add_data=True, key_length=12, key_offset=0) MVS_PDS_MEMBER = MVS_PDS + '(MEM)' hosts.all.zos_data_set(name=MVS_PDS, state="present", type="pds", record_length=TEST_DATA_RECORD_LENGTH) hosts.all.zos_data_set( @@ -671,7 +671,7 @@ def test_uss_encoding_conversion_mvs_ps_to_mvs_vsam(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(MVS_VS)), TEMP_JCL_PATH) ) results = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) for result in results.contacted.values(): assert result.get("jobs") is not None @@ -803,7 +803,7 @@ def test_vsam_backup(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(MVS_VS)), TEMP_JCL_PATH) ) hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) hosts.all.file(path=TEMP_JCL_PATH, state="absent") # submit JCL to populate KSDS @@ -814,7 +814,7 @@ def test_vsam_backup(ansible_zos_module): ) ) hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(TEMP_JCL_PATH), location="uss", wait_time_s=30 ) hosts.all.zos_encode( diff --git a/tests/functional/modules/test_zos_fetch_func.py b/tests/functional/modules/test_zos_fetch_func.py index b239bbbd9..5b8e7f878 100644 --- a/tests/functional/modules/test_zos_fetch_func.py +++ b/tests/functional/modules/test_zos_fetch_func.py @@ -89,8 +89,8 @@ def extract_member_name(data_set): def create_and_populate_test_ps_vb(ansible_zos_module, name): params=dict( name=name, - type='SEQ', - record_format='VB', + type='seq', + record_format='vb', record_length='3180', block_size='3190' ) @@ -112,7 +112,7 @@ def create_vsam_data_set(hosts, name, ds_type, key_length=None, key_offset=None) Arguments: hosts (object) -- Ansible instance(s) that can call modules. name (str) -- Name of the VSAM data set. - type (str) -- Type of the VSAM (KSDS, ESDS, RRDS, LDS) + type (str) -- Type of the VSAM (ksds, esds, rrds, lds) add_data (bool, optional) -- Whether to add records to the VSAM. key_length (int, optional) -- Key length (only for KSDS data sets). key_offset (int, optional) -- Key offset (only for KSDS data sets). @@ -122,7 +122,7 @@ def create_vsam_data_set(hosts, name, ds_type, key_length=None, key_offset=None) type=ds_type, state="present" ) - if ds_type == "KSDS": + if ds_type == "ksds": params["key_length"] = key_length params["key_offset"] = key_offset @@ -188,7 +188,7 @@ def test_fetch_uss_file_present_on_local_machine(ansible_zos_module): def test_fetch_sequential_data_set_fixed_block(ansible_zos_module): hosts = ansible_zos_module TEST_PS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="5m") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="m", space_primary=5) hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PS)) params = dict(src=TEST_PS, dest="/tmp/", flat=True) dest_path = "/tmp/" + TEST_PS @@ -229,7 +229,7 @@ def test_fetch_sequential_data_set_variable_block(ansible_zos_module): def test_fetch_partitioned_data_set(ansible_zos_module): hosts = ansible_zos_module TEST_PDS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PDS, state="present", type="PDSE") + hosts.all.zos_data_set(name=TEST_PDS, state="present", type="pdse") TEST_PDS_MEMBER = TEST_PDS + "(MEM)" hosts.all.zos_data_set(name=TEST_PDS_MEMBER, type="member") hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PDS_MEMBER)) @@ -264,7 +264,7 @@ def test_fetch_vsam_data_set(ansible_zos_module, volumes_on_systems): cmd="echo {0} > {1}/SAMPLE".format(quote(KSDS_CREATE_JCL.format(volume_1, test_vsam)), temp_jcl_path) ) hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(temp_jcl_path), location="USS", wait_time_s=30 + src="{0}/SAMPLE".format(temp_jcl_path), location="uss", wait_time_s=30 ) hosts.all.shell(cmd="echo \"{0}\c\" > {1}".format(TEST_DATA, USS_FILE)) hosts.all.zos_encode( @@ -300,7 +300,7 @@ def test_fetch_vsam_data_set(ansible_zos_module, volumes_on_systems): def test_fetch_vsam_empty_data_set(ansible_zos_module): hosts = ansible_zos_module src_ds = "TEST.VSAM.DATA" - create_vsam_data_set(hosts, src_ds, "KSDS", key_length=12, key_offset=0) + create_vsam_data_set(hosts, src_ds, "ksds", key_length=12, key_offset=0) params = dict(src=src_ds, dest="/tmp/", flat=True) dest_path = "/tmp/" + src_ds try: @@ -347,7 +347,7 @@ def test_fetch_partitioned_data_set_member_in_binary_mode(ansible_zos_module): def test_fetch_sequential_data_set_in_binary_mode(ansible_zos_module): hosts = ansible_zos_module TEST_PS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="5m") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="m", space_primary=5) hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PS)) params = dict(src=TEST_PS, dest="/tmp/", flat=True, is_binary=True) dest_path = "/tmp/" + TEST_PS @@ -368,7 +368,7 @@ def test_fetch_sequential_data_set_in_binary_mode(ansible_zos_module): def test_fetch_partitioned_data_set_binary_mode(ansible_zos_module): hosts = ansible_zos_module TEST_PDS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PDS, state="present", type="PDSE") + hosts.all.zos_data_set(name=TEST_PDS, state="present", type="pdse") TEST_PDS_MEMBER = TEST_PDS + "(MEM)" hosts.all.zos_data_set(name=TEST_PDS_MEMBER, type="member") hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PDS_MEMBER)) @@ -417,7 +417,7 @@ def test_fetch_partitioned_data_set_empty_fails(ansible_zos_module): name=pds_name, type="pds", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=25, ) @@ -438,12 +438,12 @@ def test_fetch_partitioned_data_set_member_empty(ansible_zos_module): name=pds_name, type="pds", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=25, ) hosts.all.zos_data_set(name=pds_name, type="pds") - hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="MEMBER", replace="yes") + hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="member", replace="yes") params = dict(src=pds_name + "(MYDATA)", dest="/tmp/", flat=True) dest_path = "/tmp/MYDATA" try: @@ -535,7 +535,7 @@ def test_fetch_mvs_data_set_missing_fails(ansible_zos_module): def test_fetch_sequential_data_set_replace_on_local_machine(ansible_zos_module): hosts = ansible_zos_module TEST_PS = get_tmp_ds_name() - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="5m") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="m", space_primary=5) ds_name = TEST_PS hosts.all.zos_data_set(name=TEST_PS, state="present") hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PS)) @@ -566,11 +566,11 @@ def test_fetch_partitioned_data_set_replace_on_local_machine(ansible_zos_module) name=pds_name, type="pds", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=25, ) - hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="MEMBER", replace="yes") + hosts.all.zos_data_set(name=pds_name + "(MYDATA)", type="member", replace="yes") os.mkdir(dest_path) with open(full_path, "w") as infile: infile.write(DUMMY_DATA) diff --git a/tests/functional/modules/test_zos_find_func.py b/tests/functional/modules/test_zos_find_func.py index 3a30d9510..37a67ddbc 100644 --- a/tests/functional/modules/test_zos_find_func.py +++ b/tests/functional/modules/test_zos_find_func.py @@ -124,7 +124,7 @@ def test_find_pds_members_containing_string(ansible_zos_module): batch=[ dict( name=i + "(MEMBER)", - type="MEMBER", + type="member", state='present', replace='yes' ) for i in PDS_NAMES @@ -185,10 +185,10 @@ def test_exclude_members_from_matched_list(ansible_zos_module): batch=[dict(name=i, type='pds', state='present') for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(MEMBER)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(MEMBER)", type="member") for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(FILE)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(FILE)", type="member") for i in PDS_NAMES] ) find_res = hosts.all.zos_find( pds_paths=['TEST.FIND.PDS.FUNCTEST.*'], excludes=['.*FILE$'], patterns=['.*'] @@ -221,8 +221,8 @@ def test_find_data_sets_larger_than_size(ansible_zos_module): TEST_PS1 = 'TEST.PS.ONE' TEST_PS2 = 'TEST.PS.TWO' try: - res = hosts.all.zos_data_set(name=TEST_PS1, state="present", size="5m") - res = hosts.all.zos_data_set(name=TEST_PS2, state="present", size="5m") + res = hosts.all.zos_data_set(name=TEST_PS1, state="present", space_type="m", space_primary=5) + res = hosts.all.zos_data_set(name=TEST_PS2, state="present", space_type="m", space_primary=5) find_res = hosts.all.zos_find(patterns=['TEST.PS.*'], size="1k") for val in find_res.contacted.values(): assert len(val.get('data_sets')) == 2 @@ -236,7 +236,7 @@ def test_find_data_sets_smaller_than_size(ansible_zos_module): hosts = ansible_zos_module TEST_PS = 'USER.FIND.TEST' try: - hosts.all.zos_data_set(name=TEST_PS, state="present", type="SEQ", size="1k") + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="k", space_primary=1) find_res = hosts.all.zos_find(patterns=['USER.FIND.*'], size='-1m') for val in find_res.contacted.values(): assert len(val.get('data_sets')) == 1 @@ -344,10 +344,10 @@ def test_find_mixed_members_from_pds_paths(ansible_zos_module): batch=[dict(name=i, type='pds', state='present') for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(MEMBER)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(MEMBER)", type="member") for i in PDS_NAMES] ) hosts.all.zos_data_set( - batch=[dict(name=i + "(FILE)", type="MEMBER") for i in PDS_NAMES] + batch=[dict(name=i + "(FILE)", type="member") for i in PDS_NAMES] ) find_res = hosts.all.zos_find( pds_paths=['TEST.NONE.PDS.*','TEST.FIND.PDS.FUNCTEST.*'], excludes=['.*FILE$'], patterns=['.*'] diff --git a/tests/functional/modules/test_zos_job_output_func.py b/tests/functional/modules/test_zos_job_output_func.py index 584cd6d6d..e92d377d4 100644 --- a/tests/functional/modules/test_zos_job_output_func.py +++ b/tests/functional/modules/test_zos_job_output_func.py @@ -99,7 +99,7 @@ def test_zos_job_output_job_exists(ansible_zos_module): ) jobs = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_PATH), location="USS", volume=None + src="{0}/SAMPLE".format(TEMP_PATH), location="uss", volume=None ) for job in jobs.contacted.values(): print(job) @@ -127,7 +127,7 @@ def test_zos_job_output_job_exists_with_filtered_ddname(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) result = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_PATH), location="USS", volume=None + src="{0}/SAMPLE".format(TEMP_PATH), location="uss", volume=None ) hosts.all.file(path=TEMP_PATH, state="absent") dd_name = "JESMSGLG" diff --git a/tests/functional/modules/test_zos_job_query_func.py b/tests/functional/modules/test_zos_job_query_func.py index 8f6c6e072..11680ab57 100644 --- a/tests/functional/modules/test_zos_job_query_func.py +++ b/tests/functional/modules/test_zos_job_query_func.py @@ -57,13 +57,13 @@ def test_zos_job_id_query_multi_wildcards_func(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCLQ_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=JDATA_SET_NAME, state="present", type="PDS", replace=True + name=JDATA_SET_NAME, state="present", type="pds", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, JDATA_SET_NAME) ) results = hosts.all.zos_job_submit( - src="{0}(SAMPLE)".format(JDATA_SET_NAME), location="DATA_SET", wait_time_s=10 + src="{0}(SAMPLE)".format(JDATA_SET_NAME), location="data_set", wait_time_s=10 ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -90,13 +90,13 @@ def test_zos_job_name_query_multi_wildcards_func(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCLQ_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=NDATA_SET_NAME, state="present", type="PDS", replace=True + name=NDATA_SET_NAME, state="present", type="pds", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, NDATA_SET_NAME) ) results = hosts.all.zos_job_submit( - src="{0}(SAMPLE)".format(NDATA_SET_NAME), location="DATA_SET", wait_time_s=10 + src="{0}(SAMPLE)".format(NDATA_SET_NAME), location="data_set", wait_time_s=10 ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" diff --git a/tests/functional/modules/test_zos_job_submit_func.py b/tests/functional/modules/test_zos_job_submit_func.py index c148b6223..f2f1582fa 100644 --- a/tests/functional/modules/test_zos_job_submit_func.py +++ b/tests/functional/modules/test_zos_job_submit_func.py @@ -411,8 +411,8 @@ def test_job_submit_PDS(ansible_zos_module, location): """ Test zos_job_submit with a PDS(MEMBER), also test the default value for 'location', ensure it works with and without the - value "DATA_SET". If default_location is True, then don't - pass a 'location:DATA_SET' allow its default to come through. + value "data_set". If default_location is True, then don't + pass a 'location:data_set' allow its default to come through. """ try: results = None @@ -424,7 +424,7 @@ def test_job_submit_PDS(ansible_zos_module, location): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -436,7 +436,7 @@ def test_job_submit_PDS(ansible_zos_module, location): ) else: results = hosts.all.zos_job_submit( - src="{0}(SAMPLE)".format(data_set_name), location="DATA_SET", wait_time_s=30 + src="{0}(SAMPLE)".format(data_set_name), location="data_set", wait_time_s=30 ) for result in results.contacted.values(): @@ -456,7 +456,7 @@ def test_job_submit_PDS_special_characters(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) hosts.all.zos_data_set( - name=DATA_SET_NAME_SPECIAL_CHARS, state="present", type="PDS", replace=True + name=DATA_SET_NAME_SPECIAL_CHARS, state="present", type="pds", replace=True ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format( @@ -465,7 +465,7 @@ def test_job_submit_PDS_special_characters(ansible_zos_module): ) results = hosts.all.zos_job_submit( src="{0}(SAMPLE)".format(DATA_SET_NAME_SPECIAL_CHARS), - location="DATA_SET", + location="data_set", ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -484,7 +484,7 @@ def test_job_submit_USS(ansible_zos_module): cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) ) results = hosts.all.zos_job_submit( - src="{0}/SAMPLE".format(TEMP_PATH), location="USS", volume=None + src="{0}/SAMPLE".format(TEMP_PATH), location="uss", volume=None ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -499,7 +499,7 @@ def test_job_submit_LOCAL(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL", wait_time_s=10) + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local", wait_time_s=10) for result in results.contacted.values(): print(result) @@ -513,7 +513,7 @@ def test_job_submit_LOCAL_extraR(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_BACKSLASH_R) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL", wait_time_s=10) + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local", wait_time_s=10) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -526,7 +526,7 @@ def test_job_submit_LOCAL_BADJCL(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_BAD) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL", wait_time_s=10) + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local", wait_time_s=10) for result in results.contacted.values(): # Expecting: The job completion code (CC) was not in the job log....." @@ -547,7 +547,7 @@ def test_job_submit_PDS_volume(ansible_zos_module, volumes_on_systems): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True, volumes=volume_1 + name=data_set_name, state="present", type="pds", replace=True, volumes=volume_1 ) hosts.all.shell( @@ -555,10 +555,10 @@ def test_job_submit_PDS_volume(ansible_zos_module, volumes_on_systems): ) hosts.all.zos_data_set( - name=data_set_name, state="uncataloged", type="PDS" + name=data_set_name, state="uncataloged", type="pds" ) - results = hosts.all.zos_job_submit(src=data_set_name+"(SAMPLE)", location="DATA_SET", volume=volume_1) + results = hosts.all.zos_job_submit(src=data_set_name+"(SAMPLE)", location="data_set", volume=volume_1) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" assert result.get("jobs")[0].get("ret_code").get("code") == 0 @@ -580,7 +580,7 @@ def test_job_submit_PDS_5_SEC_JOB_WAIT_15(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -589,7 +589,7 @@ def test_job_submit_PDS_5_SEC_JOB_WAIT_15(ansible_zos_module): hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=data_set_name+"(BPXSLEEP)", - location="DATA_SET", wait_time_s=wait_time_s) + location="data_set", wait_time_s=wait_time_s) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -613,7 +613,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_60(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -622,7 +622,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_60(ansible_zos_module): hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=data_set_name+"(BPXSLEEP)", - location="DATA_SET", wait_time_s=wait_time_s) + location="data_set", wait_time_s=wait_time_s) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" @@ -646,7 +646,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_10_negative(ansible_zos_module): ) hosts.all.zos_data_set( - name=data_set_name, state="present", type="PDS", replace=True + name=data_set_name, state="present", type="pds", replace=True ) hosts.all.shell( @@ -655,7 +655,7 @@ def test_job_submit_PDS_30_SEC_JOB_WAIT_10_negative(ansible_zos_module): hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=data_set_name+"(BPXSLEEP)", - location="DATA_SET", wait_time_s=wait_time_s) + location="data_set", wait_time_s=wait_time_s) for result in results.contacted.values(): assert result.get("msg") is not None @@ -682,7 +682,7 @@ def test_job_submit_max_rc(ansible_zos_module, args): f.write(JCL_FILE_CONTENTS_RC_8) results = hosts.all.zos_job_submit( - src=tmp_file.name, location="LOCAL", max_rc=args["max_rc"], wait_time_s=args["wait_time_s"] + src=tmp_file.name, location="local", max_rc=args["max_rc"], wait_time_s=args["wait_time_s"] ) for result in results.contacted.values(): @@ -771,7 +771,7 @@ def test_job_submit_jinja_template(ansible_zos_module, args): results = hosts.all.zos_job_submit( src=tmp_file.name, - location="LOCAL", + location="local", use_template=True, template_parameters=args["options"] ) @@ -794,7 +794,7 @@ def test_job_submit_full_input(ansible_zos_module): ) results = hosts.all.zos_job_submit( src="{0}/SAMPLE".format(TEMP_PATH), - location="USS", + location="uss", volume=None, # This job used to set wait=True, but since it has been deprecated # and removed, it now waits up to 30 seconds. @@ -814,7 +814,7 @@ def test_negative_job_submit_local_jcl_no_dsn(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_NO_DSN) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, wait_time_s=20, location="LOCAL") + results = hosts.all.zos_job_submit(src=tmp_file.name, wait_time_s=20, location="local") import pprint for result in results.contacted.values(): assert result.get("changed") is False @@ -827,7 +827,7 @@ def test_negative_job_submit_local_jcl_invalid_user(ansible_zos_module): with open(tmp_file.name, "w") as f: f.write(JCL_FILE_CONTENTS_INVALID_USER) hosts = ansible_zos_module - results = hosts.all.zos_job_submit(src=tmp_file.name, location="LOCAL") + results = hosts.all.zos_job_submit(src=tmp_file.name, location="local") for result in results.contacted.values(): assert result.get("changed") is False @@ -843,7 +843,7 @@ def test_job_submit_local_jcl_typrun_scan(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_SCAN) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -864,7 +864,7 @@ def test_job_submit_local_jcl_typrun_copy(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_COPY) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -887,7 +887,7 @@ def test_job_submit_local_jcl_typrun_hold(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_HOLD) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -908,7 +908,7 @@ def test_job_submit_local_jcl_typrun_jclhold(ansible_zos_module): f.write(JCL_FILE_CONTENTS_TYPRUN_JCLHOLD) hosts = ansible_zos_module results = hosts.all.zos_job_submit(src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=20, encoding={ "from": "UTF-8", @@ -946,7 +946,7 @@ def test_zoau_bugfix_invalid_utf8_chars(ansible_zos_module): results = hosts.all.zos_job_submit( src=tmp_file.name, - location="LOCAL", + location="local", wait_time_s=15 ) diff --git a/tests/functional/modules/test_zos_lineinfile_func.py b/tests/functional/modules/test_zos_lineinfile_func.py index 445c0edfe..cd1421f41 100644 --- a/tests/functional/modules/test_zos_lineinfile_func.py +++ b/tests/functional/modules/test_zos_lineinfile_func.py @@ -224,7 +224,7 @@ def set_ds_environment(ansible_zos_module, TEMP_FILE, DS_NAME, DS_TYPE, CONTENT) hosts = ansible_zos_module hosts.all.shell(cmd="echo \"{0}\" > {1}".format(CONTENT, TEMP_FILE)) hosts.all.zos_data_set(name=DS_NAME, type=DS_TYPE) - if DS_TYPE in ["PDS", "PDSE"]: + if DS_TYPE in ["pds", "pdse"]: DS_FULL_NAME = DS_NAME + "(MEM)" hosts.all.zos_data_set(name=DS_FULL_NAME, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), DS_FULL_NAME) @@ -238,10 +238,11 @@ def set_ds_environment(ansible_zos_module, TEMP_FILE, DS_NAME, DS_TYPE, CONTENT) def remove_ds_environment(ansible_zos_module, DS_NAME): hosts = ansible_zos_module hosts.all.zos_data_set(name=DS_NAME, state="absent") + # supported data set types -DS_TYPE = ['SEQ', 'PDS', 'PDSE'] +DS_TYPE = ['seq', 'pds', 'pdse'] # not supported data set types -NS_DS_TYPE = ['ESDS', 'RRDS', 'LDS'] +NS_DS_TYPE = ['esds', 'rrds', 'lds'] # The encoding will be only use on a few test ENCODING = [ 'ISO8859-1', 'UTF-8'] @@ -793,7 +794,7 @@ def test_ds_line_absent(ansible_zos_module, dstype): def test_ds_tmp_hlq_option(ansible_zos_module): # This TMPHLQ only works with sequential datasets hosts = ansible_zos_module - ds_type = "SEQ" + ds_type = "seq" kwargs = dict(backup_name=r"TMPHLQ\..") params = dict(insertafter="EOF", line="export ZOAU_ROOT", state="present", backup=True, tmp_hlq="TMPHLQ") content = TEST_CONTENT @@ -848,7 +849,7 @@ def test_ds_line_force(ansible_zos_module, dstype): MEMBER_1, MEMBER_2 = "MEM1", "MEM2" TEMP_FILE = "/tmp/{0}".format(MEMBER_2) content = TEST_CONTENT - if ds_type == "SEQ": + if ds_type == "seq": params["path"] = default_data_set_name+".{0}".format(MEMBER_2) else: params["path"] = default_data_set_name+"({0})".format(MEMBER_2) @@ -865,7 +866,7 @@ def test_ds_line_force(ansible_zos_module, dstype): ] ) # write memeber to verify cases - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(TEMP_FILE), params["path"]) else: cmdStr = "cp {0} \"//'{1}'\" ".format(quote(TEMP_FILE), params["path"]) @@ -900,7 +901,7 @@ def test_ds_line_force(ansible_zos_module, dstype): @pytest.mark.ds -@pytest.mark.parametrize("dstype", ["PDS","PDSE"]) +@pytest.mark.parametrize("dstype", ["pds","pdse"]) def test_ds_line_force_fail(ansible_zos_module, dstype): hosts = ansible_zos_module ds_type = dstype @@ -1022,7 +1023,7 @@ def test_ds_encoding(ansible_zos_module, encoding, dstype): hosts.all.shell(cmd="echo \"{0}\" > {1}".format(content, temp_file)) hosts.all.shell(cmd=f"iconv -f IBM-1047 -t {params['encoding']} temp_file > temp_file ") hosts.all.zos_data_set(name=ds_name, type=ds_type) - if ds_type in ["PDS", "PDSE"]: + if ds_type in ["pds", "pdse"]: ds_full_name = ds_name + "(MEM)" hosts.all.zos_data_set(name=ds_full_name, state="present", type="member") cmdStr = "cp -CM {0} \"//'{1}'\"".format(quote(temp_file), ds_full_name) diff --git a/tests/functional/modules/test_zos_mount_func.py b/tests/functional/modules/test_zos_mount_func.py index 1ec7c03f5..39fdd26dd 100644 --- a/tests/functional/modules/test_zos_mount_func.py +++ b/tests/functional/modules/test_zos_mount_func.py @@ -89,7 +89,7 @@ def test_basic_mount(ansible_zos_module, volumes_on_systems): srcfn = create_sourcefile(hosts, volume_1) try: mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="mounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -99,7 +99,7 @@ def test_basic_mount(ansible_zos_module, volumes_on_systems): hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path="/pythonx/", state="absent") @@ -112,10 +112,10 @@ def test_double_mount(ansible_zos_module, volumes_on_systems): volume_1 = volumes.get_available_vol() srcfn = create_sourcefile(hosts, volume_1) try: - hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted") + hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="zfs", state="mounted") # The duplication here is intentional... want to make sure it is seen mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="mounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -125,7 +125,7 @@ def test_double_mount(ansible_zos_module, volumes_on_systems): hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path="/pythonx/", state="absent") @@ -137,9 +137,9 @@ def test_remount(ansible_zos_module, volumes_on_systems): volume_1 = volumes.get_available_vol() srcfn = create_sourcefile(hosts, volume_1) try: - hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted") + hosts.all.zos_mount(src=srcfn, path="/pythonx", fs_type="zfs", state="mounted") mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="remounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="remounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -148,7 +148,7 @@ def test_remount(ansible_zos_module, volumes_on_systems): hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path="/pythonx/", state="absent") @@ -180,7 +180,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ name=dest, type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -196,7 +196,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ mount_result = hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="mounted", persistent=dict(data_store=dest_path), ) @@ -209,7 +209,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path=tmp_file_filename, state="absent") @@ -219,7 +219,7 @@ def test_basic_mount_with_bpx_nocomment_nobackup(ansible_zos_module, volumes_on_ state="absent", type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -264,7 +264,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst name=dest, type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -283,7 +283,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst mount_result = hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="mounted", persistent=dict( data_store=dest_path, @@ -326,7 +326,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", ) hosts.all.file(path=tmp_file_filename, state="absent") @@ -337,7 +337,7 @@ def test_basic_mount_with_bpx_comment_backup(ansible_zos_module, volumes_on_syst state="absent", type="pdse", space_primary=5, - space_type="M", + space_type="m", record_format="fba", record_length=80, ) @@ -349,7 +349,7 @@ def test_basic_mount_with_tmp_hlq_option(ansible_zos_module, volumes_on_systems) srcfn = create_sourcefile(hosts, volume_1) try: mount_result = hosts.all.zos_mount( - src=srcfn, path="/pythonx", fs_type="ZFS", state="mounted" + src=srcfn, path="/pythonx", fs_type="zfs", state="mounted" ) for result in mount_result.values(): assert result.get("rc") == 0 @@ -358,11 +358,11 @@ def test_basic_mount_with_tmp_hlq_option(ansible_zos_module, volumes_on_systems) finally: tmphlq = "TMPHLQ" persist_data_set = get_tmp_ds_name() - hosts.all.zos_data_set(name=persist_data_set, state="present", type="SEQ") + hosts.all.zos_data_set(name=persist_data_set, state="present", type="seq") unmount_result = hosts.all.zos_mount( src=srcfn, path="/pythonx", - fs_type="ZFS", + fs_type="zfs", state="absent", tmp_hlq=tmphlq, persistent=dict(data_store=persist_data_set, backup=True) diff --git a/tests/functional/modules/test_zos_mvs_raw_func.py b/tests/functional/modules/test_zos_mvs_raw_func.py index ca5b6384d..cbddd4419 100644 --- a/tests/functional/modules/test_zos_mvs_raw_func.py +++ b/tests/functional/modules/test_zos_mvs_raw_func.py @@ -62,7 +62,7 @@ def test_disposition_new(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -86,7 +86,7 @@ def test_dispositions_for_existing_data_set(ansible_zos_module, disposition): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -118,7 +118,7 @@ def test_list_cat_for_existing_data_set_with_tmp_hlq_option(ansible_zos_module, default_volume = volumes.get_available_vol() default_data_set = get_tmp_ds_name()[:25] hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -133,12 +133,12 @@ def test_list_cat_for_existing_data_set_with_tmp_hlq_option(ansible_zos_module, return_content=dict(type="text"), replace=True, backup=True, - type="SEQ", + type="seq", space_primary=5, space_secondary=1, - space_type="M", + space_type="m", volumes=default_volume, - record_format="FB" + record_format="fb" ), ), dict(dd_input=dict(dd_name=SYSIN_DD, content=IDCAMS_STDIN)), @@ -172,7 +172,7 @@ def test_new_disposition_for_data_set_members(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=DEFAULT_DATA_SET_WITH_MEMBER, disposition="new", - type="PDS", + type="pds", directory_blocks=15, return_content=dict(type="text"), ), @@ -197,7 +197,7 @@ def test_dispositions_for_existing_data_set_members(ansible_zos_module, disposit default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_WITH_MEMBER = default_data_set + '(MEM)' hosts.all.zos_data_set( - name=default_data_set, type="PDS", state="present", replace=True + name=default_data_set, type="pds", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -234,7 +234,7 @@ def test_normal_dispositions_data_set(ansible_zos_module, normal_disposition, ch default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="SEQ", + type="seq", state="present", replace=True, volumes=[volume_1], @@ -267,11 +267,11 @@ def test_normal_dispositions_data_set(ansible_zos_module, normal_disposition, ch @pytest.mark.parametrize( "space_type,primary,secondary,expected", [ - ("TRK", 3, 1, 169992), - ("CYL", 3, 1, 2549880), - ("B", 3, 1, 56664), - ("K", 3, 1, 56664), - ("M", 3, 1, 3003192), + ("trk", 3, 1, 169992), + ("cyl", 3, 1, 2549880), + ("b", 3, 1, 56664), + ("k", 3, 1, 56664), + ("m", 3, 1, 3003192), ], ) def test_space_types(ansible_zos_module, space_type, primary, secondary, expected): @@ -288,7 +288,7 @@ def test_space_types(ansible_zos_module, space_type, primary, secondary, expecte dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", space_primary=primary, space_secondary=secondary, space_type=space_type, @@ -315,7 +315,7 @@ def test_space_types(ansible_zos_module, space_type, primary, secondary, expecte @pytest.mark.parametrize( "data_set_type", - ["PDS", "PDSE", "LARGE", "BASIC", "SEQ"], + ["pds", "pdse", "large", "basic", "seq"], ) def test_data_set_types_non_vsam(ansible_zos_module, data_set_type, volumes_on_systems): try: @@ -351,7 +351,7 @@ def test_data_set_types_non_vsam(ansible_zos_module, data_set_type, volumes_on_s @pytest.mark.parametrize( "data_set_type", - ["KSDS", "RRDS", "LDS", "ESDS"], + ["ksds", "rrds", "lds", "esds"], ) def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_systems): try: @@ -374,7 +374,7 @@ def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_syste volumes=[volume_1], ), ) - if data_set_type != "KSDS" + if data_set_type != "ksds" else dict( dd_data_set=dict( dd_name=SYSPRINT_DD, @@ -400,7 +400,7 @@ def test_data_set_types_vsam(ansible_zos_module, data_set_type, volumes_on_syste @pytest.mark.parametrize( "record_format", - ["U", "VB", "VBA", "FB", "FBA"], + ["u", "vb", "vba", "fb", "fba"], ) def test_record_formats(ansible_zos_module, record_format, volumes_on_systems): try: @@ -453,7 +453,7 @@ def test_return_content_type(ansible_zos_module, return_content_type, expected, default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="SEQ", + type="seq", state="present", replace=True, volumes=[volume_1], @@ -505,7 +505,7 @@ def test_return_text_content_encodings( default_data_set = get_tmp_ds_name() results = hosts.all.zos_data_set( name=default_data_set, - type="SEQ", + type="seq", state="present", replace=True, volumes=[volume_1], @@ -544,7 +544,7 @@ def test_reuse_existing_data_set(ansible_zos_module): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="IDCAMS", @@ -555,7 +555,7 @@ def test_reuse_existing_data_set(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", reuse=True, return_content=dict(type="text"), ), @@ -577,7 +577,7 @@ def test_replace_existing_data_set(ansible_zos_module): hosts = ansible_zos_module default_data_set = get_tmp_ds_name() hosts.all.zos_data_set( - name=default_data_set, type="SEQ", state="present", replace=True + name=default_data_set, type="seq", state="present", replace=True ) results = hosts.all.zos_mvs_raw( program_name="IDCAMS", @@ -588,7 +588,7 @@ def test_replace_existing_data_set(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, return_content=dict(type="text"), ), @@ -619,7 +619,7 @@ def test_replace_existing_data_set_make_backup(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, return_content=dict(type="text"), ), @@ -636,7 +636,7 @@ def test_replace_existing_data_set_make_backup(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, backup=True, return_content=dict(type="text"), @@ -687,7 +687,7 @@ def test_input_empty(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -719,7 +719,7 @@ def test_input_large(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -752,7 +752,7 @@ def test_input_provided_as_list(ansible_zos_module): dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ), ), @@ -792,7 +792,7 @@ def test_input_return_content_types(ansible_zos_module, return_content_type, exp dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", ), ), dict( @@ -844,7 +844,7 @@ def test_input_return_text_content_encodings( dd_name=SYSPRINT_DD, data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", ), ), dict( @@ -1164,7 +1164,7 @@ def test_file_record_length(ansible_zos_module, record_length): @pytest.mark.parametrize( "record_format", - ["U", "VB", "VBA", "FB", "FBA"], + ["u", "vb", "vba", "fb", "fba"], ) def test_file_record_format(ansible_zos_module, record_format): try: @@ -1353,7 +1353,7 @@ def test_concatenation_with_data_set_dd_and_response(ansible_zos_module): dd_data_set=dict( data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", return_content=dict(type="text"), ) ), @@ -1361,7 +1361,7 @@ def test_concatenation_with_data_set_dd_and_response(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", ) ), ], @@ -1391,8 +1391,8 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu hosts = ansible_zos_module default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_2 = get_tmp_ds_name() - hosts.all.zos_data_set(name=default_data_set, state="present", type="SEQ") - hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="present", type="SEQ") + hosts.all.zos_data_set(name=default_data_set, state="present", type="seq") + hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="present", type="seq") results = hosts.all.zos_mvs_raw( program_name="idcams", auth=True, @@ -1405,7 +1405,7 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu dd_data_set=dict( data_set_name=default_data_set, disposition="new", - type="SEQ", + type="seq", replace=True, backup=True, return_content=dict(type="text"), @@ -1415,7 +1415,7 @@ def test_concatenation_with_data_set_dd_with_replace_and_backup(ansible_zos_modu dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", replace=True, backup=True, ) @@ -1462,7 +1462,7 @@ def test_concatenation_with_data_set_member(ansible_zos_module): default_data_set = get_tmp_ds_name() DEFAULT_DATA_SET_2 = get_tmp_ds_name() DEFAULT_DATA_SET_WITH_MEMBER = default_data_set + '(MEM)' - hosts.all.zos_data_set(name=default_data_set, state="present", type="PDS") + hosts.all.zos_data_set(name=default_data_set, state="present", type="pds") hosts.all.zos_data_set(name=DEFAULT_DATA_SET_2, state="absent") results = hosts.all.zos_mvs_raw( program_name="idcams", @@ -1482,7 +1482,7 @@ def test_concatenation_with_data_set_member(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", ) ), ], @@ -1538,7 +1538,7 @@ def test_concatenation_with_unix_dd_and_response_datasets(ansible_zos_module): dd_data_set=dict( data_set_name=DEFAULT_DATA_SET_2, disposition="new", - type="SEQ", + type="seq", ) ), ], @@ -1766,7 +1766,7 @@ def test_concatenation_all_dd_types(ansible_zos_module, dds, input_pos, input_co try: hosts = ansible_zos_module default_data_set = "ANSIBLE.USER.PRIVATE.TEST" - hosts.all.zos_data_set(name=default_data_set, state="present", type="SEQ") + hosts.all.zos_data_set(name=default_data_set, state="present", type="seq") hosts.all.file(path=DEFAULT_PATH, state="directory") hosts.all.file(path=DEFAULT_PATH_WITH_FILE, state="absent") results = hosts.all.zos_mvs_raw(program_name="idcams", auth=True, dds=dds) diff --git a/tests/functional/modules/test_zos_unarchive_func.py b/tests/functional/modules/test_zos_unarchive_func.py index 28cc0d77d..790f5b3ef 100644 --- a/tests/functional/modules/test_zos_unarchive_func.py +++ b/tests/functional/modules/test_zos_unarchive_func.py @@ -353,16 +353,16 @@ def test_uss_unarchive_copy_to_remote(ansible_zos_module): ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB",], + "record_format", ["fb", "vb",], ) def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -382,7 +382,7 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec replace=True ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{DATASET}({member})", @@ -392,7 +392,7 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -405,13 +405,13 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec format_dict = dict(name=format) if format == "terse": - format_dict["format_options"] = dict(terse_pack="SPACK") + format_dict["format_options"] = dict(terse_pack="spack") archive_result = hosts.all.zos_archive( src=DATASET, dest=MVS_DEST_ARCHIVE, format=format_dict, dest_data_set=dict(name=DATASET, - type="SEQ", + type="seq", record_format=record_format, record_length=record_length), ) @@ -464,16 +464,16 @@ def test_mvs_unarchive_single_data_set(ansible_zos_module, format, data_set, rec ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB",], + "record_format", ["fb", "vb",], ) def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -493,7 +493,7 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d replace=True ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{DATASET}({member})", @@ -503,7 +503,7 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -517,7 +517,7 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d format_dict = dict(name=format) format_dict["format_options"] = dict(use_adrdssu=True) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") archive_result = hosts.all.zos_archive( src=DATASET, dest=MVS_DEST_ARCHIVE, @@ -564,9 +564,9 @@ def test_mvs_unarchive_single_data_set_use_adrdssu(ansible_zos_module, format, d ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, data_set): @@ -580,7 +580,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, n=1, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -598,10 +598,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -640,9 +640,9 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu(ansible_zos_module, format, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, format, data_set): @@ -656,7 +656,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, n=2, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -674,10 +674,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -726,9 +726,9 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_include(ansible_zos_module, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, format, data_set): @@ -742,7 +742,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, n=2, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -760,10 +760,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -808,9 +808,9 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_exclude(ansible_zos_module, ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_set): @@ -824,7 +824,7 @@ def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_s n=2, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -842,10 +842,10 @@ def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_s format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) archive_result = hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -885,9 +885,9 @@ def test_mvs_unarchive_multiple_data_set_list(ansible_zos_module, format, data_s ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ"), - dict(dstype="PDS"), - dict(dstype="PDSE"), + dict(dstype="seq"), + dict(dstype="pds"), + dict(dstype="pdse"), ] ) @pytest.mark.parametrize( @@ -911,7 +911,7 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_force(ansible_zos_module, f n=1, type=data_set.get("dstype")) ds_to_write = target_ds_list - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: target_member_list = [] for ds in target_ds_list: target_member_list.extend( @@ -929,10 +929,10 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_force(ansible_zos_module, f format_dict = dict(name=format, format_options=dict()) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") format_dict["format_options"].update(use_adrdssu=True) hosts.all.zos_archive( - src=""" "{0}*" """.format(DATASET), + src="{0}*".format(DATASET), dest=MVS_DEST_ARCHIVE, format=format_dict, ) @@ -974,16 +974,16 @@ def test_mvs_unarchive_multiple_data_set_use_adrdssu_force(ansible_zos_module, f ]) @pytest.mark.parametrize( "data_set", [ - dict(dstype="SEQ", members=[""]), - dict(dstype="PDS", members=["MEM1", "MEM2"]), - dict(dstype="PDSE", members=["MEM1", "MEM2"]), + dict(dstype="seq", members=[""]), + dict(dstype="pds", members=["MEM1", "MEM2"]), + dict(dstype="pdse", members=["MEM1", "MEM2"]), ] ) @pytest.mark.parametrize( "record_length", [80, 120] ) @pytest.mark.parametrize( - "record_format", ["FB", "VB",], + "record_format", ["fb", "vb",], ) def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, data_set, record_length, record_format): try: @@ -1004,7 +1004,7 @@ def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, da record_format=record_format, ) # Create members if needed - if data_set.get("dstype") in ["PDS", "PDSE"]: + if data_set.get("dstype") in ["pds", "pdse"]: for member in data_set.get("members"): hosts.all.zos_data_set( name=f"{DATASET}({member})", @@ -1013,7 +1013,7 @@ def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, da ) # Write some content into src the same size of the record, # need to reduce 4 from V and VB due to RDW - if record_format in ["V", "VB"]: + if record_format in ["v", "vb"]: test_line = "a" * (record_length - 4) else: test_line = "a" * record_length @@ -1027,7 +1027,7 @@ def test_mvs_unarchive_single_data_set_remote_src(ansible_zos_module, format, da format_dict = dict(name=format) format_dict["format_options"] = dict(use_adrdssu=True) if format == "terse": - format_dict["format_options"].update(terse_pack="SPACK") + format_dict["format_options"].update(terse_pack="spack") archive_result = hosts.all.zos_archive( src=DATASET, dest=MVS_DEST_ARCHIVE, diff --git a/tests/unit/test_zos_backup_restore_unit.py b/tests/unit/test_zos_backup_restore_unit.py index a751a7599..5920febdb 100644 --- a/tests/unit/test_zos_backup_restore_unit.py +++ b/tests/unit/test_zos_backup_restore_unit.py @@ -93,7 +93,7 @@ def assert_args_invalid(zos_backup_restore, arguments): @pytest.mark.parametrize( - "space_type", ["K", "M", "G", "TRK", "CYL", "k", "m", "g", "trk", "cyl"] + "space_type", ["k", "m", "g", "trk", "cyl"] ) def test_valid_space_types(zos_backup_restore_mocker, space_type): valid_args = dict( diff --git a/tests/unit/test_zos_mvs_raw_unit.py b/tests/unit/test_zos_mvs_raw_unit.py index f528412da..e50734756 100644 --- a/tests/unit/test_zos_mvs_raw_unit.py +++ b/tests/unit/test_zos_mvs_raw_unit.py @@ -59,7 +59,7 @@ def run_command(self, *args, **kwargs): "new", "keep", "keep", - "CYL", + "cyl", 5, 1, "smsclas1", @@ -67,17 +67,17 @@ def run_command(self, *args, **kwargs): "smsclas1", 80, "SOMEKEYLAB100", - "LIBRARY", + "library", {"label": "keyforme", "encoding": "h"}, {"label": "keyforme2", "encoding": "h"}, - "U", + "u", ), ( "data.set.name(mem1)", "shr", "delete", "keep", - "TRK", + "trk", "5", 1, "smsclas1", @@ -85,17 +85,17 @@ def run_command(self, *args, **kwargs): "smsclas3", 120, "somekeylab1", - "BASIC", + "basic", {"label": "keyforme", "encoding": "l"}, {"label": "keyforme2", "encoding": "h"}, - "FB", + "fb", ), ( "DATA.NAME.HERE.NOW", "old", "catalog", "uncatalog", - "B", + "b", 55, "100", "SMSCLASS", @@ -103,17 +103,17 @@ def run_command(self, *args, **kwargs): "smscD@s3", 120, "keyfor342fdsme", - "LARGE", + "large", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "FBA", + "fba", ), ( "DAT@.now", "mod", "delete", "uncatalog", - "G", + "g", 1, "9", "SMSCLASS", @@ -121,17 +121,17 @@ def run_command(self, *args, **kwargs): "", 120, "keyfor342fdsme", - "PDSE", + "pdse", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "VB", + "vb", ), ( "DAT$.now", "new", "delete", "keep", - "M", + "m", 1, 9, "SMSCLASS", @@ -139,10 +139,10 @@ def run_command(self, *args, **kwargs): "", 0, "", - "LDS", + "lds", {"label": "keyforME", "encoding": "l"}, {"label": "keyyyyy343asdfasfsdfa", "encoding": "l"}, - "VBA", + "vba", ), ], ) @@ -237,7 +237,7 @@ def test_argument_parsing_data_set( "delete", 0, 100, - "FB", + "fb", "record", "r", ["ocreat", "oappend", "onoctty"], @@ -248,14 +248,14 @@ def test_argument_parsing_data_set( "delete", 200, "100", - "FBA", + "fba", "record", "w", ["oappend", "osync"], ), - ("/u/OEUSR01", "keep", "delete", 0, 100, "VB", "binary", "rw", ["ononblock"]), - ("/u/testmeee", "keep", "delete", 0, 100, "VBA", "record", "read_only", []), - ("/u/hellow/d/or4ld", "keep", "keep", 0, 100, "U", "text", "write_only", []), + ("/u/OEUSR01", "keep", "delete", 0, 100, "vb", "binary", "rw", ["ononblock"]), + ("/u/testmeee", "keep", "delete", 0, 100, "vba", "record", "read_only", []), + ("/u/hellow/d/or4ld", "keep", "keep", 0, 100, "u", "text", "write_only", []), ], ) def test_argument_parsing_unix( @@ -338,7 +338,7 @@ def test_argument_parsing_unix( "old", "keep", "keep", - "CYL", + "cyl", 5, 1, "smsclas1", @@ -346,17 +346,17 @@ def test_argument_parsing_unix( "smsclas1", 80, "SOMEKEYLAB100", - "LIBRARY", + "library", {"label": "keyforme", "encoding": "h"}, {"label": "keyforme2", "encoding": "h"}, - "U", + "u", ), ( "data.set.name(mem1waytoolong)", "excl", "delete", "keep", - "TRK", + "trk", "5", 1, "smsclas1", @@ -364,10 +364,10 @@ def test_argument_parsing_unix( "smsclas3", 120, "somekeylab1", - "BASIC", + "basic", {"label": "keyforme", "encoding": "l"}, {"label": "keyforme2", "encoding": "h"}, - "FB", + "fb", ), ( "DATA.NAME.HERE.NOW", @@ -382,17 +382,17 @@ def test_argument_parsing_unix( "smscD@s3", 120, "keyfor342fdsme", - "LARGE", + "large", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "FBA", + "fba", ), ( "DAT@.now", "mod", "delete", "uncatalog", - "G", + "g", 1, "9", "SMSCLASSsss", @@ -400,17 +400,17 @@ def test_argument_parsing_unix( "", 120, "keyfor342fdsme", - "PDSE", + "pdse", {"label": "keyforME", "encoding": "l"}, {"label": "KEY4me", "encoding": "h"}, - "VB", + "vb", ), ( "DAT$.now", "new", "delete", "meep", - "M", + "m", 1, 9, "SMSCLASS", @@ -418,10 +418,10 @@ def test_argument_parsing_unix( "", 0, "", - "KSDSS", + "ksdss", {"label": "keyforME", "encoding": "l"}, {"label": "keyyyyy343asdfasfsdfa", "encoding": "l"}, - "VBA", + "vba", ), ], ) @@ -525,7 +525,7 @@ def test_argument_parsing_data_set_failure_path( "delete", 200, "100", - "FBA", + "fba", "record", "w", ["append", "osync"], @@ -537,12 +537,12 @@ def test_argument_parsing_data_set_failure_path( "delete", 0, 100, - "VBA", + "vba", "record", "read_only", ["hello"], ), - ("/u/hellow/d/or4ld", "meep", "keep", 0, 100, "U", "text", None, []), + ("/u/hellow/d/or4ld", "meep", "keep", 0, 100, "u", "text", None, []), ], ) def test_argument_parsing_unix_failure_path( @@ -620,7 +620,7 @@ def test_ksds_defaults( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "KSDS", + "type": "ksds", } }, ], @@ -663,7 +663,7 @@ def test_ksds_exception_key_length( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "ESDS", + "type": "esds", "key_length": 5, } }, @@ -693,7 +693,7 @@ def test_ksds_exception_key_offset( "dd_name": "MYDD1", "data_set_name": "my.ds", "disposition": "new", - "type": "ESDS", + "type": "esds", "key_offset": 5, } }, From 18486dfee3a4f3705f3a4013637a3751cdf326a8 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:08:02 -0600 Subject: [PATCH 16/28] [Documentation][zos_data_set] Add and standarize docstrings on modules/zos_data_set.py (#1347) * First advance to docstrings on modules/zos_data_set.py * Add and standarize docstrings on modules/zos_data_set.py * Create changelog fragment * Modify google style to numpy * Standarize numpy style * Modified docstrings --------- Co-authored-by: Fernando Flores --- .../1347-update-docstring-zos_data_set.yml | 3 + plugins/modules/zos_data_set.py | 292 ++++++++++++++++-- 2 files changed, 272 insertions(+), 23 deletions(-) create mode 100644 changelogs/fragments/1347-update-docstring-zos_data_set.yml diff --git a/changelogs/fragments/1347-update-docstring-zos_data_set.yml b/changelogs/fragments/1347-update-docstring-zos_data_set.yml new file mode 100644 index 000000000..581ab1aa9 --- /dev/null +++ b/changelogs/fragments/1347-update-docstring-zos_data_set.yml @@ -0,0 +1,3 @@ +trivial: + - zos_data_set - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1347). \ No newline at end of file diff --git a/plugins/modules/zos_data_set.py b/plugins/modules/zos_data_set.py index 446fd6fe7..b500eb84a 100644 --- a/plugins/modules/zos_data_set.py +++ b/plugins/modules/zos_data_set.py @@ -736,20 +736,27 @@ def get_individual_data_set_parameters(params): """Builds a list of data set parameters to be used in future operations. - Arguments: - params {dict} -- The parameters from + Parameters + ---------- + params : dict + The parameters from Ansible's AnsibleModule object module.params. - Raises: - ValueError: Raised if top-level parameters "name" - and "batch" are both provided. - ValueError: Raised if neither top-level parameters "name" - or "batch" are provided. - - Returns: - [list] -- A list of dicts where each list item + Returns + ------- + Union[dict] + A list of dicts where each list item represents one data set. Each dictionary holds the parameters (passed to the zos_data_set module) for the data set which it represents. + + Raises + ------ + ValueError + Raised if top-level parameters "name" + and "batch" are both provided. + ValueError + Raised if neither top-level parameters "name" + or "batch" are provided. """ if params.get("name") and params.get("batch"): raise ValueError( @@ -769,7 +776,31 @@ def get_individual_data_set_parameters(params): # * can be replaced by built-in def data_set_name(contents, dependencies): """Validates provided data set name(s) are valid. - Returns a list containing the name(s) of data sets.""" + Returns a list containing the name(s) of data sets. + + Parameters + ---------- + contents : str + Name of the dataset. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + None + If the dependencies have a batch. + str + The data set name. + + Raises + ------ + ValueError + Data set name must be provided. + ValueError + Data set and member name must be provided. + ValueError + A value is invalid. + """ if dependencies.get("batch"): return None if contents is None: @@ -807,7 +838,25 @@ def data_set_name(contents, dependencies): # * dependent on state def space_type(contents, dependencies): """Validates provided data set unit of space is valid. - Returns the unit of space.""" + Returns the unit of space. + + Parameters + ---------- + contents : str + Unit of space of the dataset. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + str + The data set unit of space. + + Raises + ------ + ValueError + Value provided is invalid. +""" if dependencies.get("state") == "absent": return "m" if contents is None: @@ -825,7 +874,27 @@ def space_type(contents, dependencies): # * dependent on state def sms_class(contents, dependencies): """Validates provided sms class is of valid length. - Returns the sms class.""" + Returns the sms class. + + Parameters + ---------- + contents : str + Name of the sms class. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + None + If the state is absent or contents is none. + str + The sms class set name. + + Raises + ------ + ValueError + Value is invalid. + """ if dependencies.get("state") == "absent" or contents is None: return None if len(contents) < 1 or len(contents) > 8: @@ -840,7 +909,22 @@ def sms_class(contents, dependencies): def valid_when_state_present(contents, dependencies): """Ensures no arguments that are invalid when state!=present - are allowed.""" + are allowed. + + Parameters + ---------- + contents : str + Arguments to be validated. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + None + If the state is absent or contents is none. + str + Valid arguments. + """ if dependencies.get("state") == "absent" or contents is None: return None return contents @@ -850,7 +934,27 @@ def valid_when_state_present(contents, dependencies): # * dependent on format def record_length(contents, dependencies): """Validates provided record length is valid. - Returns the record length as integer.""" + Returns the record length as integer. + + Parameters + ---------- + contents : str + Length of the dataset. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + None + If the state is absent or contents is none. + str + The data set length. + + Raises + ------ + ValueError + Value is invalid. + """ if dependencies.get("state") == "absent": return None contents = ( @@ -872,7 +976,26 @@ def record_length(contents, dependencies): # * dependent on state # * dependent on record_length def record_format(contents, dependencies): - """Validates data set format is valid.""" + """Validates data set format is valid. + Returns uppercase data set format. + + Parameters + ---------- + contents : str + Format of the dataset. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + str + The data set format in uppercase. Default is 'FB'. + + Raises + ------ + ValueError + Value is invalid. + """ if dependencies.get("state") == "absent": return "fb" if contents is None: @@ -880,7 +1003,7 @@ def record_format(contents, dependencies): formats = "|".join(DATA_SET_FORMATS) if not re.fullmatch(formats, contents, re.IGNORECASE): raise ValueError( - "Value {0} is invalid for format argument. format must be of of the following: {1}.".format( + "Value {0} is invalid for format argument. format must be one of the following: {1}.".format( contents, ", ".join(DATA_SET_FORMATS) ) ) @@ -889,8 +1012,27 @@ def record_format(contents, dependencies): # * dependent on state def data_set_type(contents, dependencies): - """Validates data set type is valid.""" - # if dependencies.get("state") == "absent" and contents != "member": + """Validates data set type is valid. + Returns uppercase data set type. + + Parameters + ---------- + contents : str + Type of the dataset. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + str + The data set type in uppercase. Default is PDS. + + Raises + ------ + ValueError + Value is invalid. + """ + # if dependencies.get("state") == "absent" and contents != "MEMBER": # return None if contents is None: return "pds" @@ -907,7 +1049,29 @@ def data_set_type(contents, dependencies): # * dependent on state def volumes(contents, dependencies): """Validates volume is valid. - Returns uppercase volume.""" + Returns uppercase volume. + + Parameters + ---------- + contents : str + Name of the volume. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + None + If the state is absent or contents is none. + str + The volume name. + + Raises + ------ + ValueError + Argument is invalid. + ValueError + Volume is required when state is cataloged. + """ if contents is None: if dependencies.get("state") == "cataloged": raise ValueError("Volume is required when state==cataloged.") @@ -931,7 +1095,31 @@ def volumes(contents, dependencies): # * dependent on type def key_length(contents, dependencies): """Validates data set key length is valid. - Returns data set key length as integer.""" + Returns data set key length as integer. + + Parameters + ---------- + contents : str + key_length. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + None + If the state is absent or contents is none. + int + key_length. + + Raises + ------ + ValueError + Argument is invalid. + ValueError + key_length was not provided when requesting KSDS data set. + ValueError + key_length can not be provided when type is not KSDS. + """ if dependencies.get("state") == "absent": return None if dependencies.get("type") == "ksds" and contents is None: @@ -953,7 +1141,31 @@ def key_length(contents, dependencies): # * dependent on key_length def key_offset(contents, dependencies): """Validates data set key offset is valid. - Returns data set key offset as integer.""" + Returns data set key offset as integer. + + Parameters + ---------- + contents : str + Key offset of the data set. + dependencies : dict + Any dependencies needed for contents argument to be validated. + + Returns + ------- + None + If the state is absent or contents is none. + int + Key offset of the data set. + + Raises + ------ + ValueError + Argument is invalid. + ValueError + key_offset was not provided when requesting KSDS data set. + ValueError + key_offset can not be provided when type is not KSDS. + """ if dependencies.get("state") == "absent": return None if dependencies.get("type") == "ksds" and contents is None: @@ -974,7 +1186,22 @@ def key_offset(contents, dependencies): def perform_data_set_operations(name, state, **extra_args): """Calls functions to perform desired operations on - one or more data sets. Returns boolean indicating if changes were made.""" + one or more data sets. Returns boolean indicating if changes were made. + + Parameters + ---------- + name : str + Name of the dataset. + state : str + State of the data sets. + **extra_args : dict + Properties of the data sets. + + Returns + ------- + bool + If changes were made. + """ changed = False # passing in **extra_args forced me to modify the acceptable parameters # for multiple functions in data_set.py including ensure_present, replace @@ -995,6 +1222,18 @@ def perform_data_set_operations(name, state, **extra_args): def parse_and_validate_args(params): + """Parse and validate args. + + Parameters + ---------- + params : dict + Params to validated and parsed. + + Returns + ------- + dict + Parsed args. + """ arg_defs = dict( # Used for batch data set args @@ -1202,6 +1441,13 @@ def parse_and_validate_args(params): def run_module(): + """Runs the module. + + Raises + ------ + fail_json + Any exception during processing of data set params. + """ # TODO: add logic to handle aliases during parsing module_args = dict( From ae2495657f545f1d890390e17b6cd26e962c418b Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:08:29 -0600 Subject: [PATCH 17/28] [Documentation][zos_encode] Add and standarize docstrings on modules/zos_encode.py (#1348) * Add and standarize docstrings on modules/zos_encode.py * Create changelog fragment * Modify google style to numpy * Standarize numpy style * Updated docstrings --------- Co-authored-by: Fernando Flores --- .../1348-update-docstring-zos_encode.yml | 3 + plugins/modules/zos_encode.py | 88 ++++++++++++++++++- 2 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1348-update-docstring-zos_encode.yml diff --git a/changelogs/fragments/1348-update-docstring-zos_encode.yml b/changelogs/fragments/1348-update-docstring-zos_encode.yml new file mode 100644 index 000000000..de9c11c17 --- /dev/null +++ b/changelogs/fragments/1348-update-docstring-zos_encode.yml @@ -0,0 +1,3 @@ +trivial: + - zos_encode - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1348). \ No newline at end of file diff --git a/plugins/modules/zos_encode.py b/plugins/modules/zos_encode.py index 1adc08c01..243abb2d9 100644 --- a/plugins/modules/zos_encode.py +++ b/plugins/modules/zos_encode.py @@ -295,6 +295,25 @@ def check_pds_member(ds, mem): + """Check if a member exists in a PDS. + + Parameters + ---------- + ds : str + PDS data set name. + mem : str + Member name to check if is under PDS. + + Returns + ------- + bool + If it is a member of the data set. + + Raises + ------ + EncodeError + Can not find member in provided dataset. + """ check_rc = False if mem in datasets.list_members(ds): check_rc = True @@ -304,7 +323,25 @@ def check_pds_member(ds, mem): def check_mvs_dataset(ds): - """ To call data_set utils to check if the MVS data set exists or not """ + """To call data_set utils to check if the MVS data set exists or not. + + Parameters + ---------- + ds : str + Data set name. + + Returns + ------- + tuple(bool,str) + If the data set exists and it's type. + + Raises + ------ + EncodeError + If data set is not cataloged. + EncodeError + Unable to determine data set type. + """ check_rc = False ds_type = None if not data_set.DataSet.data_set_exists(ds): @@ -321,7 +358,23 @@ def check_mvs_dataset(ds): def check_file(file): - """ check file is a USS file or an MVS data set """ + """Check file is a USS file or an MVS data set. + + Parameters + ---------- + file : str + File to check. + + Returns + ------- + tuple(bool,bool,str) + If is USS file, MVS dataset, and the dataset type. + + Raises + ------ + EncodeError + The data set is not partitioned. + """ is_uss = False is_mvs = False ds_type = None @@ -347,6 +400,18 @@ def check_file(file): def verify_uss_path_exists(file): + """Verify if USS path exists. + + Parameters + ---------- + file : str + Path of the file. + + Raises + ------ + EncodeError + File does not exist in the directory. + """ if not path.exists(file): mypath = "/" + file.split("/")[0] + "/*" ld = listdir(mypath) @@ -359,6 +424,13 @@ def verify_uss_path_exists(file): def run_module(): + """Runs the module. + + Raises + ------ + fail_json + Exception during execution. + """ module_args = dict( src=dict(type="str", required=True), dest=dict(type="str"), @@ -530,6 +602,18 @@ def run_module(): class EncodeError(Exception): def __init__(self, message): + """Error during encoding. + + Parameters + ---------- + message : str + Human readable string describing the exception. + + Attributes + ---------- + msg : str + Human readable string describing the exception. + """ self.msg = 'An error occurred during encoding: "{0}"'.format(message) super(EncodeError, self).__init__(self.msg) From 581fdb277d5add77c40807fb4695c7387ddb0e68 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:09:00 -0600 Subject: [PATCH 18/28] [Documentation][zos_fetch] Add and standarize docstrings on modules/zos_fetch.py (#1349) * Add and standarize docstrings on modules/zos_fetch.py * Create changelog fragment * Modify google style to numpy * Standarize numpy style * Updated docstrings --------- Co-authored-by: Fernando Flores --- .../1349-update-docstring-zos_fetch.yml | 3 + plugins/modules/zos_fetch.py | 159 +++++++++++++++++- 2 files changed, 158 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/1349-update-docstring-zos_fetch.yml diff --git a/changelogs/fragments/1349-update-docstring-zos_fetch.yml b/changelogs/fragments/1349-update-docstring-zos_fetch.yml new file mode 100644 index 000000000..a38504c36 --- /dev/null +++ b/changelogs/fragments/1349-update-docstring-zos_fetch.yml @@ -0,0 +1,3 @@ +trivial: + - zos_fetch - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1349). \ No newline at end of file diff --git a/plugins/modules/zos_fetch.py b/plugins/modules/zos_fetch.py index cc26b622b..fda237768 100644 --- a/plugins/modules/zos_fetch.py +++ b/plugins/modules/zos_fetch.py @@ -303,16 +303,50 @@ def __init__(self, module): self.module = module def _fail_json(self, **kwargs): - """ Wrapper for AnsibleModule.fail_json """ + """Wrapper for AnsibleModule.fail_json. + + Parameters + ---------- + **kwargs : dict + Arguments to pass to fail_json(). + """ self.module.fail_json(**kwargs) def _run_command(self, cmd, **kwargs): - """ Wrapper for AnsibleModule.run_command """ + """Wrapper for AnsibleModule.run_command. + + Parameters + ---------- + cmd : str + Command to run. + **kwargs : dict + Arguments to pass to run_command(). + + Returns + ------- + tuple(int,str,str) + Return code, standard output and standard error. + """ return self.module.run_command(cmd, **kwargs) def _get_vsam_size(self, vsam): """Invoke IDCAMS LISTCAT command to get the record length and space used. Then estimate the space used by the VSAM data set. + + Parameters + ---------- + vsam : str + VSAM data set name. + + Returns + ------- + tuple(int,int,int) + Total size, max_recl and rec_total. + + Raises + ------ + fail_json + Unable to obtain data set information. """ space_pri = 0 total_size = 0 @@ -350,7 +384,27 @@ def _get_vsam_size(self, vsam): return total_size, max_recl, rec_total def _copy_vsam_to_temp_data_set(self, ds_name): - """ Copy VSAM data set to a temporary sequential data set """ + """Copy VSAM data set to a temporary sequential data set. + + Parameters + ---------- + ds_name : str + VSAM dataset name to be copied into a temp data set. + + Returns + ------- + str + Temporary dataset name. + + Raises + ------ + fail_json + OS error. + fail_json + cmd error while copying dataset. + fail_json + Failed to call IDCAMS. + """ mvs_rc = 0 vsam_size, max_recl, rec_total = self._get_vsam_size(ds_name) # Default in case of max recl being 80 to avoid failures when fetching and empty vsam. @@ -442,6 +496,25 @@ def _copy_vsam_to_temp_data_set(self, ds_name): def _fetch_uss_file(self, src, is_binary, encoding=None): """Convert encoding of a USS file. Return a tuple of temporary file name containing converted data. + + Parameters + ---------- + src : str + Source of the file. + is_binary : bool + If is binary. + encoding : str + The file encoding. + + Returns + ------- + str + File name with the converted data. + + Raises + ------ + fail_json + Any exception ocurred while converting encoding. """ file_path = None if (not is_binary) and encoding: @@ -471,6 +544,25 @@ def _fetch_uss_file(self, src, is_binary, encoding=None): def _fetch_vsam(self, src, is_binary, encoding=None): """Copy the contents of a VSAM to a sequential data set. Afterwards, copy that data set to a USS file. + + Parameters + ---------- + src : str + Source of the file. + is_binary : bool + If is binary. + encoding : str + The file encoding. + + Returns + ------- + str + USS File containing the encoded content of the input data set. + + Raises + ------ + fail_json + Unable to delete temporary dataset. """ temp_ds = self._copy_vsam_to_temp_data_set(src) file_path = self._fetch_mvs_data(temp_ds, is_binary, encoding) @@ -487,6 +579,27 @@ def _fetch_pdse(self, src, is_binary, encoding=None): """Copy a partitioned data set to a USS directory. If the data set is not being fetched in binary mode, encoding for all members inside the data set will be converted. + + Parameters + ---------- + src : str + Source of the dataset. + is_binary : bool + If is binary. + encoding : str + The file encoding. + + Returns + ------- + str + Directory path containing the files of the converted data set members. + + Raises + ------ + fail_json + Error copying partitioned dataset to USS. + fail_json + Error converting encoding of the member. """ dir_path = tempfile.mkdtemp() cmd = "cp -B \"//'{0}'\" {1}" @@ -531,7 +644,28 @@ def _fetch_pdse(self, src, is_binary, encoding=None): def _fetch_mvs_data(self, src, is_binary, encoding=None): """Copy a sequential data set or a partitioned data set member - to a USS file + to a USS file. + + Parameters + ---------- + src : str + Source of the dataset. + is_binary : bool + If is binary. + encoding : str + The file encoding. + + Returns + ------- + str + USS File containing the encoded content of the input data set. + + Raises + ------ + fail_json + Unable to copy to USS. + fail_json + Error converting encoding of the dataset. """ fd, file_path = tempfile.mkstemp() os.close(fd) @@ -571,6 +705,23 @@ def _fetch_mvs_data(self, src, is_binary, encoding=None): def run_module(): + """Runs the module. + + Raises + ------ + fail_json + When parameter verification fails. + fail_json + When the source does not exist or is uncataloged. + fail_json + When it's unable to determine dataset type. + fail_json + While gathering dataset information. + fail_json + When the data set member was not found inside a dataset. + fail_json + When the file does not have appropriate read permissions. + """ # ********************************************************** # # Module initialization # # ********************************************************** # From fdcbf5666c4890aecd9eec4cdeb85038b087fca8 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:09:27 -0600 Subject: [PATCH 19/28] [Documentation][zos_job_query] Add docstrings to modules/zos_job_query.py (#1353) * Add docstrings to modules/zos_job_query.py * Create changelog fragment * Modify google style to numpy * Standarize numpy style --- .../1353-update-docstring-zos_job_query.yml | 3 ++ plugins/modules/zos_job_query.py | 44 ++++++++++++++++++- 2 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1353-update-docstring-zos_job_query.yml diff --git a/changelogs/fragments/1353-update-docstring-zos_job_query.yml b/changelogs/fragments/1353-update-docstring-zos_job_query.yml new file mode 100644 index 000000000..550be9107 --- /dev/null +++ b/changelogs/fragments/1353-update-docstring-zos_job_query.yml @@ -0,0 +1,3 @@ +trivial: + - zos_job_query - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1353). \ No newline at end of file diff --git a/plugins/modules/zos_job_query.py b/plugins/modules/zos_job_query.py index aaa72d9ab..279a3955f 100644 --- a/plugins/modules/zos_job_query.py +++ b/plugins/modules/zos_job_query.py @@ -266,7 +266,15 @@ def run_module(): - + """Initialize the module. + + Raises + ------ + fail_json + Parameter verification failed. + fail_json + Any exception while getting job params. + """ module_args = dict( job_name=dict(type="str", required=False, default="*"), owner=dict(type="str", required=False), @@ -313,7 +321,27 @@ def run_module(): def query_jobs(job_name, job_id, owner): - + """Returns jobs that coincide with the given arguments. + + Parameters + ---------- + job_name : str + Name of the jobs. + job_id : str + Id of the jobs. + owner : str + Owner of the jobs. + + Returns + ------- + Union[str] + List with the jobs. + + Raises + ------ + RuntimeError + No job with was found. + """ jobs = [] if job_id: jobs = job_status(job_id=job_id) @@ -327,6 +355,18 @@ def query_jobs(job_name, job_id, owner): def parsing_jobs(jobs_raw): + """Parse job into an understandable format. + + Parameters + ---------- + jobs_raw : dict + Raw jobs. + + Returns + ------- + dict + Parsed jobs. + """ jobs = [] ret_code = {} for job in jobs_raw: From a5d6c35d113bc142042a7f04151f9df949e5d315 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:11:37 -0600 Subject: [PATCH 20/28] [Documentation][zos_lineinfile] Add and standarize docstrings on modules/zos_lineinfile.py (#1355) * Add and standarize docstrings on modules/zos_lineinfile.py * Create changelog fragment * Modify google style to numpy --- .../1355-update-docstring-zos_lineinfile.yml | 3 + plugins/modules/zos_lineinfile.py | 137 ++++++++++++------ 2 files changed, 98 insertions(+), 42 deletions(-) create mode 100644 changelogs/fragments/1355-update-docstring-zos_lineinfile.yml diff --git a/changelogs/fragments/1355-update-docstring-zos_lineinfile.yml b/changelogs/fragments/1355-update-docstring-zos_lineinfile.yml new file mode 100644 index 000000000..3840b2862 --- /dev/null +++ b/changelogs/fragments/1355-update-docstring-zos_lineinfile.yml @@ -0,0 +1,3 @@ +trivial: + - zos_lineinfile - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1355). \ No newline at end of file diff --git a/plugins/modules/zos_lineinfile.py b/plugins/modules/zos_lineinfile.py index a6576af12..43e85061b 100644 --- a/plugins/modules/zos_lineinfile.py +++ b/plugins/modules/zos_lineinfile.py @@ -295,33 +295,45 @@ def present(src, line, regexp, ins_aft, ins_bef, encoding, first_match, backrefs, force): - """Replace a line with the matching regex pattern - Insert a line before/after the matching pattern - Insert a line at BOF/EOF - - Arguments: - src: {str} -- The z/OS USS file or data set to modify. - line: {str} -- The line to insert/replace into the src. - regexp: {str} -- The regular expression to look for in every line of the src. - If regexp matches, ins_aft/ins_bef will be ignored. - ins_aft: {str} -- Insert the line after matching '*regex*' pattern or EOF. - choices: - - EOF - - '*regex*' - ins_bef: {str} -- Insert the line before matching '*regex*' pattern or BOF. - choices: - - BOF - - '*regex*' - encoding: {str} -- Encoding of the src. - first_match: {bool} -- Take the first matching regex pattern. - backrefs: {bool} -- Back reference - force: {bool} -- force for modify a member part of a task in execution - - Returns: - str -- Information in JSON format. keys: - cmd: {str} -- dsed shell command - found: {int} -- Number of matching regex pattern - changed: {bool} -- Indicates if the source was modified. + """Replace a line with the matching regex pattern. + Insert a line before/after the matching pattern. + Insert a line at BOF/EOF. + + Parameters + ---------- + src : str + The z/OS USS file or data set to modify. + line : str + The line to insert/replace into the src. + regexp : str + The regular expression to look for in every line of the src. + If regexp matches, ins_aft/ins_bef will be ignored. + ins_aft : str + Insert the line after matching '*regex*' pattern or EOF. + choices: + - EOF + - '*regex*' + ins_bef : str + Insert the line before matching '*regex*' pattern or BOF. + choices: + - BOF + - '*regex*' + encoding : str + Encoding of the src. + first_match : bool + Take the first matching regex pattern. + backrefs : bool + Back reference. + force : bool + force for modify a member part of a task in execution. + + Returns + ------- + str + Information in JSON format. keys: + cmd {str} -- dsed shell command + found {int} -- Number of matching regex pattern + changed {bool} -- Indicates if the source was modified. """ return datasets.lineinfile( src, @@ -339,26 +351,46 @@ def present(src, line, regexp, ins_aft, ins_bef, encoding, first_match, backrefs def absent(src, line, regexp, encoding, force): - """Delete lines with matching regex pattern - - Arguments: - src: {str} -- The z/OS USS file or data set to modify. - line: {str} -- The line to be deleted in the src. If line matches, - regexp will be ignored. - regexp: {str} -- The regular expression to look for in every line of the src. - encoding: {str} -- Encoding of the src. - force: {bool} -- force for modify a member part of a task in execution - - Returns: - str -- Information in JSON format. keys: - cmd: {str} -- dsed shell command - found: {int} -- Number of matching regex pattern - changed: {bool} -- Indicates if the source was modified. + """Delete lines with matching regex pattern. + + Parameters + ---------- + src : str + The z/OS USS file or data set to modify. + line : str + The line to be deleted in the src. If line matches, + regexp will be ignored. + regexp : str + The regular expression to look for in every line of the src. + encoding : str + Encoding of the src. + force : bool + Force for modify a member part of a task in execution. + + Returns + ------- + str + Information in JSON format. keys: + cmd {str} -- dsed shell command + found {int} -- Number of matching regex pattern + changed {bool} -- Indicates if the source was modified. """ return datasets.lineinfile(src, line, regex=regexp, encoding=encoding, state=False, debug=True, force=force) def quotedString(string): + """Add escape if string was quoted. + + Parameters + ---------- + string : str + Given string. + + Returns + ------- + str + The string with the quote marks replaced. + """ # add escape if string was quoted if not isinstance(string, str): return string @@ -366,6 +398,27 @@ def quotedString(string): def main(): + """Initialize the module. + + Raises + ------ + fail_json + Parameter verification failed. + fail_json + regexp is required with backrefs=true. + fail_json + line is required with state=present. + fail_json + One of line or regexp is required with state=absent. + fail_json + Source does not exist. + fail_json + Data set type is NOT supported. + fail_json + Creating backup has failed. + fail_json + dsed return content is NOT in json format. + """ module_args = dict( src=dict( type='str', From 216baa5100fb35ff4f0ebdf11fed29b5c58c3b3b Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:12:04 -0600 Subject: [PATCH 21/28] [Documentation][zos_script] Add and standarize docstrings on modules/zos_script.py (#1390) * Add and standarize docstrings on module-utils/zos_script.py * Add changelog fragment --- .../fragments/1390-update-docstring-zos_script.yml | 3 +++ plugins/modules/zos_script.py | 11 +++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelogs/fragments/1390-update-docstring-zos_script.yml diff --git a/changelogs/fragments/1390-update-docstring-zos_script.yml b/changelogs/fragments/1390-update-docstring-zos_script.yml new file mode 100644 index 000000000..792bf9698 --- /dev/null +++ b/changelogs/fragments/1390-update-docstring-zos_script.yml @@ -0,0 +1,3 @@ +trivial: + - zos_script - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1390). \ No newline at end of file diff --git a/plugins/modules/zos_script.py b/plugins/modules/zos_script.py index 0677d187d..e4f93ef21 100644 --- a/plugins/modules/zos_script.py +++ b/plugins/modules/zos_script.py @@ -229,6 +229,17 @@ def run_module(): + """Initialize module. + + Raises + ------ + fail_json + Parameter verification failed. + fail_json + The given chdir does not exist on the system. + fail_json + The script terminated with an error. + """ module = AnsibleModule( argument_spec=dict( chdir=dict(type='str', required=False), From 54ea6baa1ddd00344b8c5c0b62e22f8f4a744857 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:12:21 -0600 Subject: [PATCH 22/28] [Documentation][zos_tso_command] Add and standarize docstrings on modules/zos_tso_command.py (#1391) * Add and standarize docstrings on module-utils/zos_tso_command.py * Add changelog fragment * Modified docstrings --------- Co-authored-by: Fernando Flores Co-authored-by: Rich Parker --- .../1391-update-docstring-zos_tso_command.yml | 3 + plugins/modules/zos_tso_command.py | 65 +++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 changelogs/fragments/1391-update-docstring-zos_tso_command.yml diff --git a/changelogs/fragments/1391-update-docstring-zos_tso_command.yml b/changelogs/fragments/1391-update-docstring-zos_tso_command.yml new file mode 100644 index 000000000..c435799d4 --- /dev/null +++ b/changelogs/fragments/1391-update-docstring-zos_tso_command.yml @@ -0,0 +1,3 @@ +trivial: + - zos_tso_command - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1391). \ No newline at end of file diff --git a/plugins/modules/zos_tso_command.py b/plugins/modules/zos_tso_command.py index 17e190fb2..2ac4a9d32 100644 --- a/plugins/modules/zos_tso_command.py +++ b/plugins/modules/zos_tso_command.py @@ -135,6 +135,23 @@ def run_tso_command(commands, module, max_rc): + """Run tso command. + + Parameters + ---------- + commands : str + Commands to run. + module : AnsibleModule + Ansible module to run the command with. + max_rc : int + Max return code. + + Returns + ------- + Union[dict] + The command result details. + + """ script = """/* REXX */ PARSE ARG cmd address tso @@ -152,6 +169,24 @@ def run_tso_command(commands, module, max_rc): def copy_rexx_and_run_commands(script, commands, module, max_rc): + """Copy rexx into a temporary file and run commands. + + Parameters + ---------- + script : str + Script to run the command. + commands : str + Commands to run. + module : AnsibleModule + Ansible module to run the command with. + max_rc : int + Max return code. + + Returns + ------- + Union[dict] + The command result details. + """ command_detail_json = [] delete_on_close = True tmp_file = NamedTemporaryFile(delete=delete_on_close) @@ -180,6 +215,25 @@ def copy_rexx_and_run_commands(script, commands, module, max_rc): def list_or_str_type(contents, dependencies): + """Checks if a variable contains a string or a list of strings and returns it as a list of strings. + + Parameters + ---------- + contents : str | list[str] + String or list of strings. + dependencies + Unused. + + Returns + ------- + str | Union[str] + The parameter given as a list of strings. + + Raises + ------ + ValueError + Invalid argument type. Expected "string or list of strings". + """ failed = False if isinstance(contents, list): for item in contents: @@ -200,6 +254,17 @@ def list_or_str_type(contents, dependencies): def run_module(): + """Initialize module. + + Raises + ------ + fail_json + ValueError on BetterArgParser. + fail_json + Some command(s) failed. + fail_json + An unexpected error occurred. + """ module_args = dict( commands=dict(type="raw", required=True, aliases=["command"]), max_rc=dict(type="int", required=False, default=0), From ec630df94655042fb5ccb7049c252ebfbd9d746c Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:12:53 -0600 Subject: [PATCH 23/28] [Documentation][zos_volume_init] Add and standarize docstrings on modules/zos_volume_init.py (#1392) * Add and standarize docstrings on module-utils/zos_tso_command.py * Add changelog fragment --- .../fragments/1392-update-docstring-zos_volume_init.yml | 3 +++ plugins/modules/zos_volume_init.py | 6 ++++++ 2 files changed, 9 insertions(+) create mode 100644 changelogs/fragments/1392-update-docstring-zos_volume_init.yml diff --git a/changelogs/fragments/1392-update-docstring-zos_volume_init.yml b/changelogs/fragments/1392-update-docstring-zos_volume_init.yml new file mode 100644 index 000000000..4536f186c --- /dev/null +++ b/changelogs/fragments/1392-update-docstring-zos_volume_init.yml @@ -0,0 +1,3 @@ +trivial: + - zos_volume_init - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1392). \ No newline at end of file diff --git a/plugins/modules/zos_volume_init.py b/plugins/modules/zos_volume_init.py index 6dbc9f97e..0be4f2a8f 100644 --- a/plugins/modules/zos_volume_init.py +++ b/plugins/modules/zos_volume_init.py @@ -230,7 +230,13 @@ def run_module(): + """Initialize the module. + Raises + ------ + fail_json + 'Index' cannot be False for SMS managed volumes. + """ module_args = dict( address=dict(type="str", required=True), verify_volid=dict(type="str", required=False), From 87218eabcc8a4b6ddc28f5026b403e8f933cf878 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:13:16 -0600 Subject: [PATCH 24/28] [Documentation][zos_apf] Add and standarize docstrings on modules/zos_apf.py (#1393) * Add and standarize docstrings on modules/zos_apf.py * Add changelog fragment * Modified docstring --------- Co-authored-by: Fernando Flores --- .../1393-update-docstring-zos_apf.yml | 3 ++ plugins/modules/zos_apf.py | 37 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 changelogs/fragments/1393-update-docstring-zos_apf.yml diff --git a/changelogs/fragments/1393-update-docstring-zos_apf.yml b/changelogs/fragments/1393-update-docstring-zos_apf.yml new file mode 100644 index 000000000..8a89b7aa0 --- /dev/null +++ b/changelogs/fragments/1393-update-docstring-zos_apf.yml @@ -0,0 +1,3 @@ +trivial: + - zos_apf - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1393). \ No newline at end of file diff --git a/plugins/modules/zos_apf.py b/plugins/modules/zos_apf.py index 117801306..664b2e493 100644 --- a/plugins/modules/zos_apf.py +++ b/plugins/modules/zos_apf.py @@ -312,6 +312,30 @@ def backupOper(module, src, backup, tmphlq=None): + """Create a backup for a specified USS file or MVS data set. + + Parameters + ---------- + module : AnsibleModule + src : str + Source USS file or data set to backup. + backup : str + Name for the backup. + tmphlq : str + The name of the temporary high level qualifier to use. + + Returns + ------- + str + Backup name. + + Raises + ------ + fail_json + Data set type is NOT supported. + fail_json + Creating backup has failed. + """ # analysis the file type ds_utils = data_set.DataSetUtils(src) file_type = ds_utils.ds_type() @@ -336,6 +360,19 @@ def backupOper(module, src, backup, tmphlq=None): def main(): + """Initialize the module. + + Raises + ------ + fail_json + Parameter verification failed. + fail_json + Marker length may not exceed 72 characters. + fail_json + library is required. + fail_json + An exception occurred. + """ module = AnsibleModule( argument_spec=dict( library=dict( From 7abaa3618b3cbf9842ec5de347771356e5790c74 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Tue, 16 Apr 2024 10:13:47 -0600 Subject: [PATCH 25/28] [Documentation][zos_operator_action_query] Add and standarize docstrings on modules/zos_operator_action_query.py (#1394) * Add and standarize docstrings on modules/zos_operator_action_query.py * Add changelog fragment * Modified docstrings --------- Co-authored-by: Fernando Flores --- ...te_docstring-zos_operator_action_query.yml | 3 + plugins/modules/zos_operator_action_query.py | 281 +++++++++++++++++- 2 files changed, 272 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/1394-Update_docstring-zos_operator_action_query.yml diff --git a/changelogs/fragments/1394-Update_docstring-zos_operator_action_query.yml b/changelogs/fragments/1394-Update_docstring-zos_operator_action_query.yml new file mode 100644 index 000000000..25c34fd89 --- /dev/null +++ b/changelogs/fragments/1394-Update_docstring-zos_operator_action_query.yml @@ -0,0 +1,3 @@ +trivial: + - zos_operator_action_query - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1394). \ No newline at end of file diff --git a/plugins/modules/zos_operator_action_query.py b/plugins/modules/zos_operator_action_query.py index 55cd7cd00..ba6e4ee77 100644 --- a/plugins/modules/zos_operator_action_query.py +++ b/plugins/modules/zos_operator_action_query.py @@ -238,6 +238,15 @@ def run_module(): + """Initialize module. + + Raises + ------ + fail_json + A non-zero return code was received while querying the operator. + fail_json + An unexpected error occurred. + """ module_args = dict( system=dict(type="str", required=False), message_id=dict(type="str", required=False), @@ -317,6 +326,18 @@ def run_module(): def parse_params(params): + """Parse parameters using BetterArgParser. + + Parameters + ---------- + params : dict + Parameters to parse. + + Returns + ------- + dict + Parsed parameters. + """ arg_defs = dict( system=dict(arg_type=system_type, required=False), message_id=dict(arg_type=message_id_type, required=False), @@ -329,24 +350,85 @@ def parse_params(params): def system_type(arg_val, params): + """System type. + + Parameters + ---------- + arg_val : str + Argument to validate. + params : dict + Not used, but obligatory for BetterArgParser. + + Returns + ------- + str + arg_val validated in uppercase. + """ regex = "^(?:[a-zA-Z0-9]{1,8})|(?:[a-zA-Z0-9]{0,7}[*])$" validate_parameters_based_on_regex(arg_val, regex) return arg_val.upper() def message_id_type(arg_val, params): + """Message id type. + + Parameters + ---------- + arg_val : str + Argument to validate. + params : dict + Not used, but obligatory for BetterArgParser. + + Returns + ------- + str + arg_val validated in uppercase. + """ regex = "^(?:[a-zA-Z0-9]{1,})|(?:[a-zA-Z0-9]{0,}[*])$" validate_parameters_based_on_regex(arg_val, regex) return arg_val.upper() def job_name_type(arg_val, params): + """Job name type. + + Parameters + ---------- + arg_val : str + Argument to validate. + params : dict + Not used, but obligatory for BetterArgParser. + + Returns + ------- + str + arg_val validated in uppercase. + """ regex = "^(?:[a-zA-Z0-9]{1,8})|(?:[a-zA-Z0-9]{0,7}[*])$" validate_parameters_based_on_regex(arg_val, regex) return arg_val.upper() def message_filter_type(arg_val, params): + """Message filter type. + + Parameters + ---------- + arg_val : str + Argument to validate. + params : dict + Not used, but obligatory for BetterArgParser. + + Returns + ------- + str + regex of the given argument. + + Raises + ------ + ValidationError + An error occurred during validate the input parameters. + """ try: filter_text = arg_val.get("filter") use_regex = arg_val.get("use_regex") @@ -364,6 +446,25 @@ def message_filter_type(arg_val, params): def validate_parameters_based_on_regex(value, regex): + """Validate parameters based on regex. + + Parameters + ---------- + value : str + Argument to compare to regex pattern. + regex : str + Regex to get pattern from. + + Returns + ------- + str + The value given. + + Raises + ------ + ValidationError + An error occurred during validate the input parameters. + """ pattern = re.compile(regex) if pattern.fullmatch(value): pass @@ -373,7 +474,20 @@ def validate_parameters_based_on_regex(value, regex): def find_required_request(merged_list, params): - """Find the request given the options provided.""" + """Find the request given the options provided. + + Parameters + ---------- + merged_list : list + Merged list to search. + params : dict + Parameters to get for the function. + + Returns + ------- + Union + Filtered list. + """ requests = filter_requests(merged_list, params) return requests @@ -381,9 +495,24 @@ def find_required_request(merged_list, params): def create_merge_list(message_a, message_b, message_filter): """Merge the return lists that execute both 'd r,a,s' and 'd r,a,jn'. For example, if we have: - 'd r,a,s' response like: "742 R MV28 JOB57578 &742 ARC0055A REPLY 'GO'OR 'CANCEL'" + 'd r,a,s' response like: "742 R MV28 JOB57578 &742 ARC0055A REPLY 'GO' OR 'CANCEL'" 'd r,a,jn' response like:"742 R FVFNT29H &742 ARC0055A REPLY 'GO' OR 'CANCEL'" - the results will be merged so that a full list of information returned on condition""" + the results will be merged so that a full list of information returned on condition. + + Parameters + ---------- + message_a : str + Result coming from command 'd r,a,s'. + message_b : str + Result coming from command 'd r,a,jn'. + message_filter : str + Message filter. + + Returns + ------- + Union + Merge of the result of message_a and the result of message_b. + """ list_a = parse_result_a(message_a, message_filter) list_b = parse_result_b(message_b, message_filter) merged_list = merge_list(list_a, list_b) @@ -391,7 +520,20 @@ def create_merge_list(message_a, message_b, message_filter): def filter_requests(merged_list, params): - """filter the request given the params provided.""" + """Filter the request given the params provided. + + Parameters + ---------- + merged_list : list + Merged list to filter. + params : dict + Parameters to get for the function. + + Returns + ------- + Union + Filtered list. + """ system = params.get("system") message_id = params.get("message_id") job_name = params.get("job_name") @@ -406,6 +548,22 @@ def filter_requests(merged_list, params): def handle_conditions(merged_list, condition_type, value): + """Handle conditions. + + Parameters + ---------- + merged_list : list[dict] + List to check. + condition_type : str + Condition type to check. + value + Value to check for. + + Returns + ------- + Union[dict] + The new list. + """ # regex = re.compile(condition_values) newlist = [] exist = False @@ -422,6 +580,24 @@ def handle_conditions(merged_list, condition_type, value): def execute_command(operator_cmd, timeout_s=1, *args, **kwargs): + """Execute operator command. + + Parameters + ---------- + operator_cmd : str + Operator command. + timeout_s : int + Timeout to wait for the command execution, measured in centiseconds. + *args : dict + Arguments for the command. + **kwargs : dict + More arguments for the command. + + Returns + ------- + OperatorQueryResult + The result of the command. + """ # as of ZOAU v1.3.0, timeout is measured in centiseconds, therefore: timeout_c = 100 * timeout_s response = opercmd.execute(operator_cmd, timeout_c, *args, **kwargs) @@ -433,6 +609,20 @@ def execute_command(operator_cmd, timeout_s=1, *args, **kwargs): def match_raw_message(msg, message_filter): + """Match raw message. + + Parameters + ---------- + msg : str + Message to match. + message_filter : str + Filter for the message. + + Return + ------ + bool + If the pattern matches msg. + """ pattern = re.compile(message_filter, re.DOTALL) return pattern.match(msg) @@ -442,7 +632,20 @@ def parse_result_a(result, message_filter): there are usually two formats: - line with job_id: 810 R MV2D JOB58389 &810 ARC0055A REPLY 'GO' OR 'CANCEL' - line without job_id: 574 R MV28 *574 IXG312E OFFLOAD DELAYED FOR.. - also the request contains multiple lines, we need to handle that as well""" + also the request contains multiple lines, we need to handle that as well. + + Parameters + ---------- + result : str + Result coming from command 'd r,a,s'. + message_filter : str + Message filter. + + Returns + ------- + Union[dict[str,str]] + Resulting list. + """ dict_temp = {} list = [] @@ -474,7 +677,20 @@ def parse_result_a(result, message_filter): def parse_result_b(result, message_filter): """Parse the result that comes from command 'd r,a,jn', the main purpose to use this command is to get the job_name and message id, which is not - included in 'd r,a,s'""" + included in 'd r,a,s' + + Parameters + ---------- + result : str + Result coming from command 'd r,a,jn'. + message_filter : str + Message filter. + + Returns + ------- + Union[dict[str,str]] + Resulting list. + """ dict_temp = {} list = [] @@ -506,6 +722,20 @@ def parse_result_b(result, message_filter): def merge_list(list_a, list_b): + """Merge lists. + + Parameters + ---------- + list_a : list + First list to be merged. + list_b : list + Second list to be merged. + + Returns + ------- + Union + Merged of list_a and list_b. + """ merged_list = [] for dict_a in list_a: for dict_b in list_b: @@ -522,6 +752,18 @@ class Error(Exception): class ValidationError(Error): def __init__(self, message): + """An error occurred during validate the input parameters. + + Parameters + ---------- + message : str + Message of the error that ocurred. + + Attributes + ---------- + msg : str + Human readable string describing the exception. + """ self.msg = ( 'An error occurred during validate the input parameters: "{0}"'.format( message @@ -538,12 +780,27 @@ def __init__( ): """Response object class to manage the result from executing a command to query for actionable messages. Class will also generate a message - by concatenating stdout and stderr - - Arguments: - rc {str} -- The return code - stdout {str} -- The standard out of the command run - stderr {str} -- The standard error of the command run + by concatenating stdout and stderr. + + Parameters + ---------- + rc : str + The return code. + stdout : str + The standard out of the command run. + stderr : str + The standard error of the command run. + + Attributes + ---------- + rc : str + The return code. + stdout : str + The standard out of the command run. + stderr : str + The standard error of the command run. + message : str + The standard out of the command run. """ self.rc = rc self.stdout = stdout From 3d38011f67f1cf41e5a519f2b18bc4b412d8e911 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= <68956970+AndreMarcel99@users.noreply.github.com> Date: Tue, 16 Apr 2024 10:52:48 -0600 Subject: [PATCH 26/28] [Bugfix][1239][zos job submit]max_rc_more_than_0_doesn_not_put_change_as_true (#1345) * First iteration of solution * Change dataset * Ensure all cases for false * Remove print * Change behavior for bugfix * Add fragment * Fix latest lower case * Fix uppercase * Remove typo * Remove typo * Fix redundance * Fix test and upper cases * Fix test case * Fix fragment * Return to lower case * Return to lower case --------- Co-authored-by: Fernando Flores --- ...1345-max_rc_more_than_0_doesn_not_put_change_as_true.yml | 5 +++++ plugins/modules/zos_job_submit.py | 4 +++- tests/functional/modules/test_zos_fetch_func.py | 5 +++-- tests/functional/modules/test_zos_job_submit_func.py | 6 +++--- 4 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1345-max_rc_more_than_0_doesn_not_put_change_as_true.yml diff --git a/changelogs/fragments/1345-max_rc_more_than_0_doesn_not_put_change_as_true.yml b/changelogs/fragments/1345-max_rc_more_than_0_doesn_not_put_change_as_true.yml new file mode 100644 index 000000000..a09b8fa64 --- /dev/null +++ b/changelogs/fragments/1345-max_rc_more_than_0_doesn_not_put_change_as_true.yml @@ -0,0 +1,5 @@ +bugfixes: + - zos_job_submit - when the argument max_rc was different than 0 the changed response returned + as false. Fix now return a changed response as true when the rc is not 0 and max_rc is above + or equal to the value of the job. + (https://github.com/ansible-collections/ibm_zos_core/pull/1345). \ No newline at end of file diff --git a/plugins/modules/zos_job_submit.py b/plugins/modules/zos_job_submit.py index 1b56f459d..204c79217 100644 --- a/plugins/modules/zos_job_submit.py +++ b/plugins/modules/zos_job_submit.py @@ -1108,7 +1108,9 @@ def assert_valid_return_code(max_rc, job_rc, ret_code, result): # should NOT be 'changed=true' even though the user did override the return code, # a non-zero return code means the job did not change anything, so set it as # result["chagned"]=False, - if job_rc != 0: + if max_rc and job_rc > max_rc: + return False + elif job_rc != 0 and max_rc is None: return False return True diff --git a/tests/functional/modules/test_zos_fetch_func.py b/tests/functional/modules/test_zos_fetch_func.py index 5b8e7f878..4d72a6cc5 100644 --- a/tests/functional/modules/test_zos_fetch_func.py +++ b/tests/functional/modules/test_zos_fetch_func.py @@ -539,12 +539,12 @@ def test_fetch_sequential_data_set_replace_on_local_machine(ansible_zos_module): ds_name = TEST_PS hosts.all.zos_data_set(name=TEST_PS, state="present") hosts.all.shell(cmd="decho \"{0}\" \"{1}\"".format(TEST_DATA, TEST_PS)) - dest_path = "/tmp/" + ds_name + dest_path = "/tmp/" + TEST_PS with open(dest_path, "w") as infile: infile.write(DUMMY_DATA) local_checksum = checksum(dest_path, hash_func=sha256) - params = dict(src=ds_name, dest="/tmp/", flat=True) + params = dict(src=TEST_PS, dest="/tmp/", flat=True) try: results = hosts.all.zos_fetch(**params) for result in results.contacted.values(): @@ -562,6 +562,7 @@ def test_fetch_partitioned_data_set_replace_on_local_machine(ansible_zos_module) pds_name = get_tmp_ds_name() dest_path = "/tmp/" + pds_name full_path = dest_path + "/MYDATA" + pds_name_mem = pds_name + "(MYDATA)" hosts.all.zos_data_set( name=pds_name, type="pds", diff --git a/tests/functional/modules/test_zos_job_submit_func.py b/tests/functional/modules/test_zos_job_submit_func.py index f2f1582fa..34fb39d4b 100644 --- a/tests/functional/modules/test_zos_job_submit_func.py +++ b/tests/functional/modules/test_zos_job_submit_func.py @@ -713,11 +713,11 @@ def test_job_submit_max_rc(ansible_zos_module, args): assert re.search(r'the submitted job is greater than the value set for option', repr(result.get("msg"))) elif args["max_rc"] == 12: - # Will not fail but changed will be false for the non-zero RC, there - # are other possibilities like an ABEND or JCL ERROR will fail this even + # Will not fail and as the max_rc is set to 12 and the rc is 8 is a change true + # there are other possibilities like an ABEND or JCL ERROR will fail this even # with a MAX RC assert result.get("msg") is None - assert result.get('changed') is False + assert result.get('changed') is True assert result.get("jobs")[0].get("ret_code").get("code") < 12 finally: hosts.all.file(path=tmp_file.name, state="absent") From b198d02c57b7b2a58f74893c2d56118bb0188f28 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Thu, 18 Apr 2024 12:01:13 -0600 Subject: [PATCH 27/28] [Bug][zos_find] Filter allocated space when using size filter (#1443) * Update zos_archive choices * Update zos_backup_restore choices * Update zos_copy choices * Update zos_data_set choices * Update module docs * Update zos_job_submit choices * Update zos_mount choices * Update zos_unarchive choices * Fix zos_archive and update its tests This also includes major work on zos_data_set since half of the test suite for zos_archive depends on creating data sets. * Update zos_backup_restore tests * Update zos_blockinfile tests * Update more modules * Updated more tests * Update zos_unarchive and zos_mount * Update zos_backup_restore unit tests * Corrected size value to use allocated size instead of utilized size * Added size fix * Updated test * Corrected test * Updated docs * Updated changelog * Added test --------- Co-authored-by: Ivan Moreno --- changelogs/fragments/1443-zos_find-filter-size.yml | 4 ++++ plugins/modules/zos_find.py | 3 ++- tests/functional/modules/test_zos_find_func.py | 14 +++++++++----- 3 files changed, 15 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1443-zos_find-filter-size.yml diff --git a/changelogs/fragments/1443-zos_find-filter-size.yml b/changelogs/fragments/1443-zos_find-filter-size.yml new file mode 100644 index 000000000..a5a8ce029 --- /dev/null +++ b/changelogs/fragments/1443-zos_find-filter-size.yml @@ -0,0 +1,4 @@ +bugfixes: + - zos_find - Filter size failed if a PDS/E matched the pattern. Fix now gets the correct size + for PDS/Es. + (https://github.com/ansible-collections/ibm_zos_core/pull/1443). \ No newline at end of file diff --git a/plugins/modules/zos_find.py b/plugins/modules/zos_find.py index b49d65f04..a12241458 100644 --- a/plugins/modules/zos_find.py +++ b/plugins/modules/zos_find.py @@ -31,6 +31,7 @@ author: - "Asif Mahmud (@asifmahmud)" - "Demetrios Dimatos (@ddimatos)" + - "Fernando Flores (@fernandofloresg)" options: age: description: @@ -479,7 +480,7 @@ def data_set_attribute_filter( age and not size and _age_filter(ds_age, now, age) ) or ( - size and not age and _size_filter(int(out[5]), size) + size and not age and _size_filter(int(out[6]), size) ) ): filtered_data_sets.add(ds) diff --git a/tests/functional/modules/test_zos_find_func.py b/tests/functional/modules/test_zos_find_func.py index 37a67ddbc..42a8db23e 100644 --- a/tests/functional/modules/test_zos_find_func.py +++ b/tests/functional/modules/test_zos_find_func.py @@ -15,6 +15,7 @@ __metaclass__ = type from ibm_zos_core.tests.helpers.volumes import Volume_Handler +import pytest SEQ_NAMES = [ "TEST.FIND.SEQ.FUNCTEST.FIRST", @@ -32,6 +33,8 @@ "TEST.FIND.VSAM.FUNCTEST.FIRST" ] +DATASET_TYPES = ['seq', 'pds', 'pdse'] + def create_vsam_ksds(ds_name, ansible_zos_module, volume="000000"): hosts = ansible_zos_module @@ -118,7 +121,7 @@ def test_find_pds_members_containing_string(ansible_zos_module): search_string = "hello" try: hosts.all.zos_data_set( - batch=[dict(name=i, type='pds') for i in PDS_NAMES] + batch=[dict(name=i, type='pds', space_primary=1, space_type="m") for i in PDS_NAMES] ) hosts.all.zos_data_set( batch=[ @@ -216,13 +219,14 @@ def test_find_data_sets_older_than_age(ansible_zos_module): assert val.get('matched') == 2 -def test_find_data_sets_larger_than_size(ansible_zos_module): +@pytest.mark.parametrize("ds_type", DATASET_TYPES) +def test_find_data_sets_larger_than_size(ansible_zos_module, ds_type): hosts = ansible_zos_module TEST_PS1 = 'TEST.PS.ONE' TEST_PS2 = 'TEST.PS.TWO' try: - res = hosts.all.zos_data_set(name=TEST_PS1, state="present", space_type="m", space_primary=5) - res = hosts.all.zos_data_set(name=TEST_PS2, state="present", space_type="m", space_primary=5) + res = hosts.all.zos_data_set(name=TEST_PS1, state="present", space_primary="1", space_type="m", type=ds_type) + res = hosts.all.zos_data_set(name=TEST_PS2, state="present", space_primary="1", space_type="m", type=ds_type) find_res = hosts.all.zos_find(patterns=['TEST.PS.*'], size="1k") for val in find_res.contacted.values(): assert len(val.get('data_sets')) == 2 @@ -236,7 +240,7 @@ def test_find_data_sets_smaller_than_size(ansible_zos_module): hosts = ansible_zos_module TEST_PS = 'USER.FIND.TEST' try: - hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_type="k", space_primary=1) + hosts.all.zos_data_set(name=TEST_PS, state="present", type="seq", space_primary="1", space_type="k") find_res = hosts.all.zos_find(patterns=['USER.FIND.*'], size='-1m') for val in find_res.contacted.values(): assert len(val.get('data_sets')) == 1 From 9b6b051097836a9d00a73377130f4d5af3f24e34 Mon Sep 17 00:00:00 2001 From: IsaacVRey Date: Thu, 18 Apr 2024 12:03:17 -0600 Subject: [PATCH 28/28] [Documentation][zos_find] Add and standarize docstrings on modules/zos_find.py (#1350) * Add and standarize docstrings on modules/zos_find.py * Create changelog fragment * Modify google style to numpy * Updated docstrings --------- Co-authored-by: Fernando Flores --- .../1350-update-docstring-zos_find.yml | 3 + plugins/modules/zos_find.py | 390 +++++++++++++----- 2 files changed, 297 insertions(+), 96 deletions(-) create mode 100644 changelogs/fragments/1350-update-docstring-zos_find.yml diff --git a/changelogs/fragments/1350-update-docstring-zos_find.yml b/changelogs/fragments/1350-update-docstring-zos_find.yml new file mode 100644 index 000000000..48c1fbce1 --- /dev/null +++ b/changelogs/fragments/1350-update-docstring-zos_find.yml @@ -0,0 +1,3 @@ +trivial: + - zos_find - Updated docstrings to numpy style for visual aid to developers. + (https://github.com/ansible-collections/ibm_zos_core/pull/1350). \ No newline at end of file diff --git a/plugins/modules/zos_find.py b/plugins/modules/zos_find.py index a12241458..b269c472d 100644 --- a/plugins/modules/zos_find.py +++ b/plugins/modules/zos_find.py @@ -277,18 +277,28 @@ def content_filter(module, patterns, content): """ Find data sets that match any pattern in a list of patterns and - contains the given content - - Arguments: - module {AnsibleModule} -- The Ansible module object being used in the module - patterns {list[str]} -- A list of data set patterns - content {str} -- The content string to search for within matched data sets - - Returns: - dict[ps=set, pds=dict[str, str], searched=int] -- A dictionary containing + contains the given content. + + Parameters + ---------- + module : AnsibleModule + The Ansible module object being used in the module. + patterns : list[str] + A list of data set patterns. + content : str + The content string to search for within matched data sets. + + Returns + ------- + dict[ps=set, pds=dict[str, str], searched=int] + A dictionary containing a set of matched "PS" data sets, a dictionary containing "PDS" data sets and members corresponding to each PDS, an int representing number of total data sets examined. + + Raises + ------ + fail_json: Non-zero return code received while executing ZOAU shell command 'dgrep'. """ filtered_data_sets = dict(ps=set(), pds=dict(), searched=0) for pattern in patterns: @@ -321,15 +331,25 @@ def content_filter(module, patterns, content): def data_set_filter(module, pds_paths, patterns): """ Find data sets that match any pattern in a list of patterns. - Arguments: - module {AnsibleModule} -- The Ansible module object being used - patterns {list[str]} -- A list of data set patterns - - Returns: - dict[ps=set, pds=dict[str, str], searched=int] -- A dictionary containing + Parameters + ---------- + module : AnsibleModule + The Ansible module object being used. + patterns : list[str] + A list of data set patterns. + + Returns + ------- + dict[ps=set, pds=dict[str, str], searched=int] + A dictionary containing a set of matched "PS" data sets, a dictionary containing "PDS" data sets and members corresponding to each PDS, an int representing number of total data sets examined. + + Raises + ------ + fail_json + Non-zero return code received while executing ZOAU shell command 'dls'. """ filtered_data_sets = dict(ps=set(), pds=dict(), searched=0) patterns = pds_paths or patterns @@ -372,15 +392,21 @@ def pds_filter(module, pds_dict, member_patterns, excludes=None): """ Return all PDS/PDSE data sets whose members match any of the patterns in the given list of member patterns. - Arguments: - module {AnsibleModule} -- The Ansible module object being used in the module - pds_dict {dict[str, str]} -- A dictionary where each key is the name of - of the PDS/PDSE and the value is a list of - members belonging to the PDS/PDSE - member_patterns {list} -- A list of member patterns to search for - - Returns: - dict[str, set[str]] -- Filtered PDS/PDSE with corresponding members + Parameters + ---------- + module : AnsibleModule + The Ansible module object being used in the module. + pds_dict : dict[str, str] + A dictionary where each key is the name of + of the PDS/PDSE and the value is a list of + members belonging to the PDS/PDSE. + member_patterns : list + A list of member patterns to search for. + + Returns + ------- + dict[str, set[str]] + Filtered PDS/PDSE with corresponding members. """ filtered_pds = dict() for pds, members in pds_dict.items(): @@ -412,12 +438,22 @@ def vsam_filter(module, patterns, resource_type, age=None): """ Return all VSAM data sets that match any of the patterns in the given list of patterns. - Arguments: - module {AnsibleModule} -- The Ansible module object being used - patterns {list[str]} -- A list of data set patterns - - Returns: - set[str]-- Matched VSAM data sets + Parameters + ---------- + module : AnsibleModule + The Ansible module object being used. + patterns : list[str] + A list of data set patterns. + + Returns + ------- + set[str] + Matched VSAM data sets. + + Raises + ------ + fail_json + Non-zero return code received while executing ZOAU shell command 'vls'. """ filtered_data_sets = set() now = time.time() @@ -447,14 +483,26 @@ def data_set_attribute_filter( ): """ Filter data sets based on attributes such as age or size. - Arguments: - module {AnsibleModule} -- The Ansible module object being used - data_sets {set[str]} -- A set of data set names - size {int} -- The size, in bytes, that should be used to filter data sets - age {int} -- The age, in days, that should be used to filter data sets - - Returns: - set[str] -- Matched data sets filtered by age and size + Parameters + ---------- + module : AnsibleModule + The Ansible module object being used. + data_sets : set[str] + A set of data set names. + size : int + The size, in bytes, that should be used to filter data sets. + age : int + The age, in days, that should be used to filter data sets. + + Returns + ------- + set[str] + Matched data sets filtered by age and size. + + Raises + ------ + fail_json + Non-zero return code received while executing ZOAU shell command 'dls'. """ filtered_data_sets = set() now = time.time() @@ -494,13 +542,24 @@ def volume_filter(module, data_sets, volumes): """Return only the data sets that are allocated in one of the volumes from the list of input volumes. - Arguments: - module {AnsibleModule} -- The Ansible module object - data_sets {set[str]} -- A set of data sets to be filtered - volumes {list[str]} -- A list of input volumes - - Returns: - set[str] -- The filtered data sets + Parameters + ---------- + module : AnsibleModule + The Ansible module object. + data_sets : set[str] + A set of data sets to be filtered. + volumes : list[str] + A list of input volumes. + + Returns + ------- + set[str] + The filtered data sets. + + Raises + ------ + fail_json + Unable to retrieve VTOC information. """ filtered_data_sets = set() for volume in volumes: @@ -518,15 +577,21 @@ def volume_filter(module, data_sets, volumes): def exclude_data_sets(module, data_set_list, excludes): - """Remove data sets that match any pattern in a list of patterns - - Arguments: - module {AnsibleModule} -- The Ansible module object being used - data_set_list {set[str]} -- A set of data sets to be filtered - excludes {list[str]} -- A list of data set patterns to be excluded - - Returns: - set[str] -- The remaining data sets that have not been excluded + """Remove data sets that match any pattern in a list of patterns. + + Parameters + ---------- + module : AnsibleModule + The Ansible module object being used. + data_set_list : set[str] + A set of data sets to be filtered. + excludes : list[str] + A list of data set patterns to be excluded. + + Returns + ------- + set[str] + The remaining data sets that have not been excluded. """ for ds in set(data_set_list): for ex_pat in excludes: @@ -537,15 +602,21 @@ def exclude_data_sets(module, data_set_list, excludes): def _age_filter(ds_date, now, age): - """Determine whether a given date is older than 'age' - - Arguments: - ds_date {str} -- The input date in the format YYYY/MM/DD - now {float} -- The time elapsed since the last epoch - age {int} -- The age, in days, to compare against - - Returns: - bool -- Whether 'ds_date' is older than 'age' + """Determine whether a given date is older than 'age'. + + Parameters + ---------- + ds_date : str + The input date in the format YYYY/MM/DD. + now : float + The time elapsed since the last epoch. + age : int + The age, in days, to compare against. + + Returns + ------- + bool + Whether 'ds_date' is older than 'age'. """ year, month, day = list(map(int, ds_date.split("/"))) if year == "0000": @@ -561,14 +632,24 @@ def _age_filter(ds_date, now, age): def _get_creation_date(module, ds): - """Retrieve the creation date for a given data set - - Arguments: - module {AnsibleModule} -- The Ansible module object being used - ds {str} -- The name of the data set - - Returns: - str -- The data set creation date in the format "YYYY/MM/DD" + """Retrieve the creation date for a given data set. + + Arguments + --------- + module : AnsibleModule + The Ansible module object being used. + ds : str + The name of the data set. + + Returns + ------- + str + The data set creation date in the format "YYYY/MM/DD". + + Raises + ------ + fail_json + Non-zero return code received while retrieving data set age. """ rc, out, err = mvs_cmd.idcams( " LISTCAT ENT('{0}') HISTORY".format(ds), authorized=True @@ -596,14 +677,19 @@ def _get_creation_date(module, ds): def _size_filter(ds_size, size): - """ Determine whether a given size is greater than the input size - - Arguments: - ds_size {int} -- The input size, in bytes - size {int} -- The size, in bytes, to compare against - - Returns: - bool -- Whether 'ds_size' is greater than 'age' + """Determine whether a given size is greater than the input size. + + Parameters + ---------- + ds_size : int + The input size, in bytes. + size : int + The size, in bytes, to compare against. + + Returns + ------- + bool + Whether 'ds_size' is greater than 'age'. """ if size >= 0 and ds_size >= abs(size): return True @@ -613,15 +699,26 @@ def _size_filter(ds_size, size): def _match_regex(module, pattern, string): - """ Determine whether the input regex pattern matches the string - - Arguments: - module {AnsibleModule} -- The Ansible module object being used - pattern {str} -- The regular expression to match - string {str} -- The string to match - - Returns: - re.Match -- A Match object that matches the pattern to string + """Determine whether the input regex pattern matches the string. + + Parameters + ---------- + module : AnsibleModule + The Ansible module object being used. + pattern : str + The regular expression to match. + string : str + The string to match. + + Returns + ------- + re.Match + A Match object that matches the pattern to string. + + Raises + ------ + fail_json + Invalid regular expression. """ try: return fullmatch(pattern, string, re.IGNORECASE) @@ -640,7 +737,28 @@ def _dgrep_wrapper( verbose=False, context=None ): - """A wrapper for ZOAU 'dgrep' shell command""" + """A wrapper for ZOAU 'dgrep' shell command. + + Parameters + ---------- + data_set_pattern : str + Data set pattern where to search for content. + content : str + Content to search across the data sets specified in data_set_pattern. + ignore_case : bool + Whether to ignore case or not. + line_num : bool + Whether to display line numbers. + verbose : bool + Extra verbosity, prints names of datasets being searched. + context : int + If context lines are requested, then up to lines before and after the matching line are also printed. + + Returns + ------- + tuple(int,str,str) + Return code, standard output and standard error. + """ dgrep_cmd = "dgrep" if ignore_case: dgrep_cmd += " -i" @@ -663,7 +781,28 @@ def _dls_wrapper( verbose=False, migrated=False ): - """A wrapper for ZOAU 'dls' shell command""" + """A wrapper for ZOAU 'dls' shell command. + + Parameters + ---------- + data_set_pattern : str + Data set pattern. + list_details : bool + Display detailed information based on the dataset type. + u_time : bool + Display last usage time. + size : bool + Display size in list. + verbose : bool + Display verbose information. + migrated : bool + Display migrated data sets. + + Returns + ------- + tuple(int,str,str) + Return code, standard output and standard error. + """ dls_cmd = "dls" if migrated: dls_cmd += " -m" @@ -682,7 +821,22 @@ def _dls_wrapper( def _vls_wrapper(pattern, details=False, verbose=False): - """A wrapper for ZOAU 'vls' shell command""" + """A wrapper for ZOAU 'vls' shell command. + + Parameters + ---------- + pattern : str + Data set pattern. + details : bool + Display detailed information based on the dataset type. + verbose : bool + Display verbose information. + + Returns + ------- + tuple(int,str,str) + Return code, standard output and standard error. + """ vls_cmd = "vls" if details: vls_cmd += " -l" @@ -694,6 +848,20 @@ def _vls_wrapper(pattern, details=False, verbose=False): def _match_resource_type(type1, type2): + """Compare that the two types match. + + Parameters + ---------- + type1 : str + One of the types that are expected to match. + type2 : str + One of the types that are expected to match. + + Returns + ------- + bool + If the types match. + """ if type1 == type2: return True if type1 == "CLUSTER" and type2 not in ("DATA", "INDEX"): @@ -702,13 +870,17 @@ def _match_resource_type(type1, type2): def _ds_type(ds_name): - """Utility function to determine the DSORG of a data set + """Utility function to determine the DSORG of a data set. - Arguments: - ds_name {str} -- The name of the data set + Parameters + ---------- + ds_name : str + The name of the data set. - Returns: - str -- The DSORG of the data set + Returns + ------- + str + The DSORG of the data set. """ rc, out, err = mvs_cmd.ikjeft01( " LISTDS '{0}'".format(ds_name), @@ -721,6 +893,25 @@ def _ds_type(ds_name): def run_module(module): + """Initialize parameters. + + Parameters + ---------- + module : AnsibleModule + Ansible Module. + + Returns + ------- + dict + Arguments. + + Raises + ------ + fail_json + Failed to process age. + fail_json + Failed to process size. + """ # Parameter initialization age = module.params.get('age') age_stamp = module.params.get('age_stamp') @@ -817,6 +1008,13 @@ def run_module(module): def main(): + """Initialize module when it's run as main. + + Raises + ------ + fail_json + Parameter verification failed. + """ module = AnsibleModule( argument_spec=dict( age=dict(type="str", required=False),