diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9efc1ea61..d23ceb7ed 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,51 @@ ibm.ibm\_zos\_core Release Notes .. contents:: Topics +v1.11.0-beta.1 +============== + +Release Summary +--------------- + +Release Date: '2024-08-05' +This changelog describes all changes made to the modules and plugins included +in this collection. The release date is the date the changelog is created. +For additional details such as required dependencies and availability review +the collections `release notes `__ + +Minor Changes +------------- + +- zos_apf - Change input to auto-escape 'library' names containing symbols (https://github.com/ansible-collections/ibm_zos_core/pull/1493). +- zos_archive - Added support for GDG and GDS relative name notation to archive data sets. Added support for data set names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1511). +- zos_backup_restore - Added support for GDS relative name notation to include or exclude data sets when operation is backup. Added support for data set names with special characters like $, /#, and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1527). +- zos_blockinfile - Added support for GDG and GDS relative name notation to use a data set. And backup in new generations. Added support for data set names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1516). +- zos_copy - add support for copying generation data sets (GDS) and generation data groups (GDG), as well as using a GDS for backup. (https://github.com/ansible-collections/ibm_zos_core/pull/1564). +- zos_data_set - Added support for GDG and GDS relative name notation to create, delete, catalog and uncatalog a data set. Added support for data set names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1504). +- zos_encode - add support for encoding generation data sets (GDS), as well as using a GDS for backup. (https://github.com/ansible-collections/ibm_zos_core/pull/1531). +- zos_fetch - add support for fetching generation data groups and generation data sets. (https://github.com/ansible-collections/ibm_zos_core/pull/1519) +- zos_find - added support for GDG/GDS and special characters (https://github.com/ansible-collections/ibm_zos_core/pull/1518). +- zos_job_submit - Improved the copy to remote mechanic to avoid using deepcopy that could result in failure for some systems. (https://github.com/ansible-collections/ibm_zos_core/pull/1561). +- zos_job_submit - add support for generation data groups and generation data sets as sources for jobs. (https://github.com/ansible-collections/ibm_zos_core/pull/1497) +- zos_lineinfile - Added support for GDG and GDS relative name notation to use a data set. And backup in new generations. Added support for data set names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1516). +- zos_mount - Added support for data set names with special characters ($, /#, /- and @). This is for both src and backup data set names. (https://github.com/ansible-collections/ibm_zos_core/pull/1631). +- zos_tso_command - Added support for GDG and GDS relative name notation to use a data set name. Added support for data set names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1563). +- zos_mvs_raw - Added support for GDG and GDS relative name notation to use a data set. Added support for data set names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1525). +- zos_mvs_raw - Added support for GDG and GDS relative positive name notation to use a data set. (https://github.com/ansible-collections/ibm_zos_core/pull/1541). +- zos_mvs_raw - Redesign the wrappers of dd clases to use properly the arguments. (https://github.com/ansible-collections/ibm_zos_core/pull/1470). +- zos_script - Improved the copy to remote mechanic to avoid using deepcopy that could result in failure for some systems. (https://github.com/ansible-collections/ibm_zos_core/pull/1561). +- zos_unarchive - Added support for data set names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1511). +- zos_unarchive - Improved the copy to remote mechanic to avoid using deepcopy that could result in failure for some systems. (https://github.com/ansible-collections/ibm_zos_core/pull/1561). + +Bugfixes +-------- + +- module_util/data_set.py - DataSet.data_set_cataloged function previously only returned True or False, but failed to account for exceptions which occurred during the LISTCAT. The fix now raises an MVSCmdExecError if the return code from LISTCAT is too high. (https://github.com/ansible-collections/ibm_zos_core/pull/1535). +- zos_copy - a regression in version 1.4.0 made the module stop automatically computing member names when copying a single file into a PDS/E. Fix now lets a user copy a single file into a PDS/E without adding a member in the dest option. (https://github.com/ansible-collections/ibm_zos_core/pull/1570). +- zos_copy - module would use opercmd to check if a non existent destination data set is locked. Fix now only checks if the destination is already present. (https://github.com/ansible-collections/ibm_zos_core/pull/1623). +- zos_job_submit - Was not propagating any error types UnicodeDecodeError, JSONDecodeError, TypeError, KeyError when encountered, now the error message shares the type error. (https://github.com/ansible-collections/ibm_zos_core/pull/1560). +- zos_mvs_raw - DD_output first character from each line was missing. Change now includes the first character of each line. (https://github.com/ansible-collections/ibm_zos_core/pull/1543). + v1.10.0 ======= diff --git a/README.md b/README.md index 629ce15b4..e0d274bad 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ To upgrade the collection to the latest available version, run the following com ansible-galaxy collection install ibm.ibm_zos_core --upgrade ``` -
You can also install a specific version of the collection, for example, if you need to downgrade for some reason. Use the following syntax to install version 1.0.0: +
You can also install a specific version of the collection, for example, if you need to install a different version. Use the following syntax to install version 1.0.0: ```sh ansible-galaxy collection install ibm.ibm_zos_core:1.0.0 @@ -123,7 +123,7 @@ environment_vars: ## Testing -All releases, will meet the following test criteria. +All releases will meet the following test criteria. * 100% success for [Functional](https://github.com/ansible-collections/ibm_zos_core/tree/dev/tests/functional) tests. * 100% success for [Unit](https://github.com/ansible-collections/ibm_zos_core/tree/dev/tests/unit) tests. @@ -134,9 +134,9 @@ All releases, will meet the following test criteria.
This release of the collection was tested with following dependencies. * ansible-core v2.15.x -* Python 3.9.x +* Python 3.11.x * IBM Open Enterprise SDK for Python 3.11.x -* IBM Z Open Automation Utilities (ZOAU) 1.3.0.x +* IBM Z Open Automation Utilities (ZOAU) 1.3.1.x * z/OS V2R5 This release introduces case sensitivity for option values and includes a porting guide in the [release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html) to assist with which option values will need to be updated. @@ -177,9 +177,10 @@ For Galaxy and GitHub users, to see the supported ansible-core versions, review | Version | Status | Release notes | Changelogs | |----------|----------------|---------------|------------| -| 1.11.x | In development | unreleased | unreleased | +| 1.12.x | In development | unreleased | unreleased | +| 1.11.x | In preview | [Release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html#version-1-11-0-beta.1) | [Changelogs](https://github.com/ansible-collections/ibm_zos_core/blob/v1.11.0-beta.1/CHANGELOG.rst) | | 1.10.x | Current | [Release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html#version-1-10-0) | [Changelogs](https://github.com/ansible-collections/ibm_zos_core/blob/v1.10.0/CHANGELOG.rst) | -| 1.9.x | Released | [Release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html#version-1-9-0) | [Changelogs](https://github.com/ansible-collections/ibm_zos_core/blob/v1.9.0/CHANGELOG.rst) | +| 1.9.x | Released | [Release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html#version-1-9-2) | [Changelogs](https://github.com/ansible-collections/ibm_zos_core/blob/v1.9.2/CHANGELOG.rst) | | 1.8.x | Released | [Release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html#version-1-8-0) | [Changelogs](https://github.com/ansible-collections/ibm_zos_core/blob/v1.8.0/CHANGELOG.rst) | | 1.7.x | Released | [Release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html#version-1-7-0) | [Changelogs](https://github.com/ansible-collections/ibm_zos_core/blob/v1.7.0/CHANGELOG.rst) | | 1.6.x | Released | [Release notes](https://ibm.github.io/z_ansible_collections_doc/ibm_zos_core/docs/source/release_notes.html#version-1-6-0) | [Changelogs](https://github.com/ansible-collections/ibm_zos_core/blob/v1.6.0/CHANGELOG.rst) | diff --git a/ac b/ac index 016b760ea..14fa159d4 100755 --- a/ac +++ b/ac @@ -28,7 +28,7 @@ VENV_HOME_MANAGED=${PWD%/venv}/venv -# Lest normalize the version from 3.10.2 to 3010002000 +# Normalize the version from 3.10.2 to 3010002000 # Do we we need that 4th octet? normalize_version() { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; @@ -103,15 +103,32 @@ DOCKER_INFO=`podman info> /dev/null 2>&1;echo $?` # ============================================================================== # Arg parsing helpers # ============================================================================== -terminate() { - printf '%s\n' "$1" >&2 + +# ------------------------------------------------------------------------------ +# This method generates an INFO message with green color and dividers. This +# message will always be sent to STDERR so that STDOUT can be reserved for +# return codes.. Use this method for messages to the console. +# ------------------------------------------------------------------------------ +message(){ + printf '%s\n' "${GRN}${DIV}${ENDC}" >&2 + printf '%s\n' "${GRN}INFO:${ENDC} ${1}" >&2 + printf '%s\n' "${GRN}${DIV}${ENDC}" >&2 +} + +# ------------------------------------------------------------------------------ +# This method generates an ERROR message with red color. This message +# will always be sent to STDERR so that STDOUT can be reserved for return codes. +# Use this method for error messages to the console. +# ------------------------------------------------------------------------------ +message_error(){ + ERROR_MSG="${RED}ERROR${ENDC}: $1" + printf '%s\n' "${ERROR_MSG}" >&2 exit 1 } -message(){ - echo $DIV; - echo "$1"; - echo $DIV; +message_warn(){ + WARN_MSG="${YEL}WARN${ENDC}: $1" + printf '%s\n' "${WARN_MSG}" >&2 } ensure_managed_venv_exists(){ @@ -123,6 +140,11 @@ ensure_managed_venv_exists(){ fi } +terminate() { + printf '%s\n' "$1" >&2 + exit 1 +} + # ------------------------------------------------------------------------------ # Generate simple formated but incomplete help # ------------------------------------------------------------------------------ @@ -187,6 +209,15 @@ help(){ helpMessage=" "substr($0, 3); \ print helpMessage } + } else if ($0 ~ /^##[[:space:]][[:space:]]*\$[[:space:]]*--/) { \ + helpMessage = substr($0, 3); \ + if (helpCommand && helpMessage) {\ + printf "\033[36m%-16s\033[0m %s\n", helpCommand, helpMessage; \ + helpCommand =""; \ + } else {\ + helpMessage=" "substr($0, 6); \ + print helpMessage + } } }' $0 fi @@ -200,7 +231,7 @@ option_processor(){ opt=$1 arg=$2 if [ "$arg" ]; then - echo $arg + echo "$arg" elif [ "$opt" ]; then # Split up to "=" and set the remainder value=${opt#*=} @@ -417,10 +448,12 @@ ac_sanity(){ ## defaults to all tests in file running. ## debug - enable debug for pytest (-s), choices are true and false ## Example: -## $ ac --ac-test --host ec01150a --python 3.10 --zoau 1.2.2 --file tests/functional/modules/test_zos_operator_func.py --test test_zos_operator_positive_path --debug true +## $ ac --ac-test --host ec01150a --python 3.10 --zoau 1.2.2\ +## $ --file tests/functional/modules/test_zos_operator_func.py --test test_zos_operator_positive_path --debug true ## $ ac --ac-test --host ec33012a --python 3.10 --zoau 1.2.2 --file tests/functional/modules/test_zos_operator_func.py --debug true ## $ ac --ac-test --file tests/functional/modules/test_zos_operator_func.py --debug true ## $ ac --ac-test +## $ ac --ac-test --host ec01130a --python 3.10 --zoau 1.3.1 --file invalid/test/returns/rc/of/4/to/stderr 2>>/dev/null ac_test(){ host=$1 python=$2 @@ -449,23 +482,390 @@ ac_test(){ exit 1 fi - #cd ${VENV_BIN} - if [ "$file" ]; then - . ${VENV_BIN}/activate && ${VENV_BIN}/pytest --ignore="${skip}" $CURR_DIR/${file} --host-pattern=all --zinventory=${VENV}/config.yml "${debug}" + . ${VENV_BIN}/activate && ${VENV_BIN}/pytest $CURR_DIR/${file} --ignore="${skip}" --host-pattern=all --zinventory=${VENV}/config.yml ${debug} >&2 ; echo $? >&1 else for file in `ls tests/functional/modules/*.py`; do - # For some reason '--ignore not being honored so injecting a work around if [ "$file" != "$skip" ]; then - . ${VENV_BIN}/activate && ${VENV_BIN}/pytest --ignore="${skip}" $CURR_DIR/${file} --host-pattern=all --zinventory=${VENV}/config.yml "${debug}" + . ${VENV_BIN}/activate && ${VENV_BIN}/pytest $CURR_DIR/${file} --ignore="${skip}" --host-pattern=all --zinventory=${VENV}/config.yml ${debug} >&2 ; echo $? >&1 fi done fi # Clean up the collections folder after running the tests, temporary work around. rm -rf collections/ansible_collections +} - #cd ${CURR_DIR} +# ------------------------------------------------------------------------------ +# Run concurrent executor: +# ------------------------------------------------------------------------------ +#->test-concurrent: +## Run the conncurrent executor (CE) that can drive test cases to a cluster of hosts. +## Usage: ac --test-concurrent [--host ] [--user ] --python [--zoau ] [--pythonpath ] +## [--volumes ] [--file ] [--skip ] [--itr ] [--replay ] +## [--timeout ] [--throttle ] [--workers ] [--maxjob ] [--maxnode ] +## [--bal ] [--verbose ] [--verbosity ] [--debug ] [--extra ] +## Options: +## host (optional): +## - Space or comma delimited managed nodes to use. +## - Entering one more managed nodes overrries the auto detection feature which +## will build a cluster of managed nodes to run on. +## - Only the host prefix is needed, e.g. 'ec01150a' +## user (optional): +## - Ansible user authorized to run tests on the managed node. +## python (requred): -> +## - IBM enterprise python version, e.g 3.10', '3.11', '3.12' +## zoau (optional): +## - ZOAU version to use. e.g. 1.2.5, 1.3.0, 1.3.1 +## pythonpath (optional): +## - The absolute path to where the ZOAU python module is located. +## - The can be for the precopiled binary, wheels or setup tools installation home. +## - Default is to use the precompiled binary (until we establish wheel locations) +## volumes (optional): +## - The volumes to use with the test cases, overrides the auto volume assignment. +# - Defaults to, "222222,000000" +## file (optional): +## - Space or comma delimited test suites that should be included in the result. +## - A test suite is a collection of test cases in a file that starts with +## 'test' and ends in '.py'. +## - Do not include the absolute path, this is automatically deteremined. +## - For all functional tests, use the `functional/*` notation. +## - For all unit tests, use the `unit/*` notation for directories. +## - Default is all functional and unit tests. +## - A directory of test cases is such that it contains test suites. +## skip (optional): +## - Space or comma delimited test suites that should not be included +## in the result. +## - Supply only the test suite name, the tooling will prepend the +## necessay path. +## - Default is to skip 'test_module_security.py', this can not be removed but +## it can be replaced with another test or tests. +## itr (optional): +## - Configure the number of iterations to rerun failed test cases. +## - Each iteration will run only the prior iterations failed tests until +## either their are no more iterations left or there are no more failed +## tests to run. +## - Default is 50 so that full regression can succeed. +## replay (optional): +## - Instruct the CE to replay the entire command with all provided options +## for only the failed tests. +## - The idea behind this is if you did not set enough iterations, rather than +## start all over you could instruce CE to rerun with the failed test cases +## it has recorded, giving a higher probabity there will be success. +## - Each replay will run only the prior iterations failed tests until +## either their are no more replay's left or there are no more failed +## tests to run. +## - Default is 5, so that full regression can succeed. +## timeout (optional): +## - The maximum time in seconds a job should wait for completion. +## - When set, a subprocess call executing pytest will waith this amount of time. +## - Default is 300 seconds (5 minutes). +## throttle (optional): +## - Configuration throttles the managed node test execution such that a node will +## only run one one job at at time, no matter the threads. +## - If disabled (False), concurrency will increase, but has the risk of encountering +## increased ansible connnection failures, while this could result in shorter regression +## it could also result in longer times because of failed connections. +## - Default is True, managed nodes will only execute one test at time. +## workers (optional): +## - The numerical multiplier used to increase the number of worker threads. +## - This value is multiplied by the number of managed nodes to calculate the +## number of threads to start the CE thread pool with. +## - Default is 1, so CE will have 1 thread for each managed node. +## - Any value greater than 1, will automatically disable throttle. +## - At this time, setting more threads could result in connection failures, see throttle. +## maxjob (optional): +## - The maximum number of times a test case can fail before its removed from the job queue. +## - This is helpful in indentifying a bug, possibly in a test case or module. +## - Setting this value sets an upper bound limit on how many times a test case is permitted +## to fail. +## - Default is 10, such that the test will no longer be permitted to execute after 10. +## maxnode (optional): +## - The maximum number tests that can fail on a managed node before the node is removed +## from the node queue. +## - This helpful in identifying a problematic managed node such that it may require an IPL. +## - Default is 30, such that the managede will no longer be permitted to run tests after 30. +## - After the default is exceeded, the managde node is set to OFFLINE status. +## bal (optional): +## - The maximum number of times a test is perimtted to fail on a given managed node +## before be assigned to a new managed node. +## - This is helpful in identifying test cases that may be experiencing managned node latency, +## this allows CE to assign the test case to a new less active managed node such that it might +## a higher chance of success. +## - Default is 10, after a test case fails 10 times on a node it will be assigned to a new managed node. +## verbose (optional): +## - Instruct CE to run with verbose stdout to the console. +## - This will instruct CE to write all statistics to stdout. +## - Default is 'False', no verbosity to the console. +## - Statistics are always written to directory '/tmp' as text and HTML files. +## - Files in '/tmp' will follow this name pattern, eg conncurrent-excutor-log---. +## - examples are: +## - concurrent-executor-log-00:21:24.txt +## - concurrent-executor-log-replay-1-failure-00:21:24.html +## - concurrent-executor-tests-replay-1-success-00:21:24.html +## verbosity (optional): +## - Configure pytest verbosity level. +## - Integer value corresponds to verbosity level. +## - 1 = -v, 2 = -vv, 3 = -vvv, 4 = -vvvv +## - Default is 0, no verbosity. +## debug (optional): +## - Instruct Pytest whether to capture any output (stdout/stderr), equivalent of pytest -s. +## - Default False +## extra (optional): +## - Extra commands passed to subprocess before pytest execution +## - This is helpful if you want to expose insert an enviroment var or even +## run a shell command before exeucting, e.g 'cd ../..' +## returncode (optional): +## - Instruct CE whether to return a return code. +## - If 'True', the stdout is surpressed and a return code is sent to stdout. +## - A zero return code means the overall execution has successed for the configuration submitted, +## where a non-zero return code represents the number of failed tests. +## - Default is False +## Example: +## $ ac --test-concurrent --host ec01130a --python 3.11 --zoau 1.3.0 +## $ ac --test-concurrent --host ec01130a --python 3.11 --zoau 1.3.0 --file test_zos_operator_func.py --debug true +## $ ac --test-concurrent --host "ec01130a,ec33012a,ec33017a" --python 3.11 --zoau 1.3.0\ +## $ --file test_zos_operator_func.py,test_zos_job_submit_func.py\ +## $ --skip "test_zos_job_submit_func.py::test_job_from_gdg_source[0]" --debug true +## $ ac --test-concurrent --host ec01130a --python 3.11 --zoau 1.3.0 --file test_zos_operator_func.py --returncode True --itr 1 +## $ ac --test-concurrent --host ec01130a --python 3.11 --zoau 1.3.1 --file test_zos_data_set_func.py --itr 1 --replay 1 +## test_case_1 +test_concurrent(){ + + # ---------------------------------------------------------------------------------------------------------------------------------- + # CE -> AC -> AC vars -> var mapping -> defaults + # ---------------------------------------------------------------------------------------------------------------------------------- + # --hostnames -> --host -> host=$1 -> pass through -> adhoc else auto discovered + # --user -> --user -> user=$2 -> pass through -> adhoc else auto discovered + # --pyz -> --python -> python=$3 -> pass through -> adhoc (auto translated to absolute path) + # --zoau -> --zoau -> zoau=$4 -> pass through -> adhoc (auto translated to absolute path) + # --pythonpath -> --pythonpath -> pythonpath=$5 -> pass through -> 'zoau/lib' or 'zoau/lib/' + # --volumes -> --volumes -> volumes=$6 -> pass through -> "222222,000000" + # --paths -> --file -> file=$7 -> pass through -> "functional/*,unit/*" + # --skip -> --skip -> skip=$8 -> pass through -> "test_module_security.py" + # --itr -> --itr -> itr=$9 -> pass through -> 50 + # --replay -> --replay -> replay=$10 -> pass through -> 5 + # --timeout -> --timeout -> timeout=$11 -> pass through -> 300 + # --throttle -> --throttle -> throttle=$12 -> True = '--throttle', else '--no-throttle' -> True + # --workers -> --workers -> workers=$13 -> pass through -> 1 + # --maxjob -> --maxjob -> maxjob=$14 -> pass through -> 10 + # --maxnode -> --maxnode -> maxnode=$15 -> pass through -> 30 + # --bal -> --bal -> bal=$16 -> pass through -> 10 + # --verbose -> --verbose -> verbose=$17 -> True = '--verbose', else '--no-verbose' -> False + # --verbosity -> --verbosity -> verbosity=$18 -> pass through -> 0 + # --capture -> --debug -> debug=$19 -> True = '--capture', else '--no-capture' -> False + # --extra -> --extr -> extra=$20 -> pass through -> "cd `pwd`" + # ---------------------------------------------------------------------------------------------------------------------------------- + + # echo "host=${1} user=${2} python=${3} zoau=${4} pythonpath=${5} volumes=${6} file=${7} skip=${8} itr=${9} replay=${10}"\ + # "timeout=${11} throttle=${12} workers=${13} maxjob=${14} maxnode=${15} bal=${16} verbose=${17} verbosity=${18} debug=${19} extra=${20} returncode=${21}" + + host="${1}" + user="${2}" + python="${3}" + zoau="${4}" + pythonpath="${5}" + volumes="${6}" + file="${7}" + skip="${8}" + itr="${9}" + replay="${10}" + timeout="${11}" + throttle="${12}" + workers="${13}" + maxjob="${14}" + maxnode="${15}" + bal="${16}" + verbose="${17}" + verbosity="${18}" + debug="${19}" + extra="${20}" + returncode="${21}" + + # Invoke shell script helpers to set variables if host is not null + if [ ! -z "${host}" ]; then + hostname=$($VENV/./venv.sh --host-credentials "${host}") + + if [ -z "${user}" ]; then + user=$($VENV/./venv.sh --user-credentials "${host}") + fi + + if [ -z "${pass}" ]; then + pass=$($VENV/./venv.sh --pass-credentials "${host}") + fi + + host=$hostname + fi + + # Convert the python from short notation to absolute path + python=$($VENV/./mounts.sh --get-python-mount "${python}") + + zoau=$($VENV/./mounts.sh --get-zoau-mount "${zoau}") + + # Build a zoau precompiled binary path if $pythonpath is null + if [ -z "${pythonpath}" ]; then + zoau_version=`echo ${zoau#*/v}` + if [ $(normalize_version "${zoau_version}") -lt 1003000000 ]; then + pythonpath=$zoau/lib + else + suffix=`echo ${python#*cyp/v}` + version=`echo ${suffix%*/pyz}` + pythonpath_version=`echo $version|sed 's/r/./g'` + pythonpath=$zoau/lib/$pythonpath_version + fi + fi + + first_entry=true + file_tests="" + strings_func=$(ac_test_pytest_finder "functional/*" "" true true) + strings_unit=$(ac_test_pytest_finder "unit/*" "" true true) + + corrected_file="" + # Support shorter notation passed to the utils module 'get_test_case()'', the python module + # does not support such notation because the module can be run from many locations and requires + # absolute paths. This notation is translated to absolute paths. + # TODO: Add support for invidual tests, --file test_load_balance_full.py::test_case_1, issue 1636 + for i in $(echo $file | sed "s/,/ /g") # Optionally: skip=\"`echo $2 | tr ',' ' '`\" + do + if [ "$i" == "functional/*" ];then + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/functional/modules/" + else + file_tests="$file_tests $CURR_DIR/tests/functional/modules/" + fi + elif [ "$i" == "unit/*" ];then + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/unit/" + else + file_tests="$file_tests $CURR_DIR/tests/unit/" + fi + elif echo $strings_func | tr ' ' '\n'|grep $i >/dev/null; then + if [ ! -e "$CURR_DIR/tests/functional/modules/$i" ]; then + message_error "File = $CURR_DIR/tests/functional/modules/$i not found." + fi + + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/functional/modules/$i" + else + file_tests="$file_tests $CURR_DIR/tests/functional/modules/$i" + fi + elif echo $strings_unit | tr ' ' '\n'|grep $i >/dev/null; then + if [ ! -e "$CURR_DIR/tests/unit/$i" ]; then + message_error "File = $CURR_DIR/tests/unit/$i not found." + fi + + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/unit/" + else + file_tests="$file_tests $CURR_DIR/tests/unit/$i" + fi + elif [[ $i == tests/functional/modules/* ]] || [[ $i == $CURR_DIR/tests/functional/modules/* ]] ;then + message_warn "It appears an absolute path has been used, 'ac' will try to truncate it to the test suite." + suffix=`echo ${i#*/modules/}` + + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/functional/modules/$suffix" + else + file_tests="$file_tests $CURR_DIR/tests/functional/modules/$suffix" + fi + elif [[ $i == tests/unit/* ]] || [[ $i == $CURR_DIR/tests/unit/* ]] ;then + message_warn "It appears an absolute path has been used, 'ac' will try to truncate it to the test suite." + suffix=`echo ${i#*/modules/}` + + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/unit/$suffix" + else + file_tests="$file_tests $CURR_DIR/tests/unit/$suffix" + fi + else + message_error "File = $i, not found in project path = $CURR_DIR." + fi + done + first_entry=true + file=$file_tests + + # Convert any comma separated strings to space delimited as needed by the tooling. + first_entry=true + skip_tests="" + for i in $(echo $skip | sed "s/,/ /g") # Optionally: skip=\"`echo $2 | tr ',' ' '`\" + do + if [ "$first_entry" == "true" ];then + first_entry=false + skip_tests="$CURR_DIR/tests/functional/modules/$i" + else + skip_tests="$skip_tests $CURR_DIR/tests/functional/modules/$i" + fi + done + skip=$skip_tests + + # Uppercase value for --throttle + throttle=`echo $throttle | tr '[:lower:]' '[:upper:]'` + if [ "$throttle" == "TRUE" ];then + throttle="--throttle" + else + throttle="--no-throttle" + fi + + # Uppercase value for --verbose + verbose=`echo $verbose | tr '[:lower:]' '[:upper:]'` + if [ "$verbose" == "TRUE" ];then + verbose="--verbose" + else + verbose="--no-verbose" + fi + + # Uppercase value for --capture + debug=`echo $debug | tr '[:lower:]' '[:upper:]'` + if [ "$debug" == "TRUE" ];then + debug="--capture" + else + debug="--no-capture" + fi + + # Uppercase value for --capture + returncode=`echo $returncode | tr '[:lower:]' '[:upper:]'` + if [ "$returncode" == "TRUE" ];then + returncode="--returncode" + else + returncode="--no-returncode" + fi + + # Useful for debug to see what is acutally passed what ./ac will pass to ce.py + # echo "host=${host} user=${user} python=${python} zoau=${zoau} pythonpath=${pythonpath} volumes=${volumes},"\ + # "file=${file} skip=${skip} itr=${itr} replay=${replay} timeout=${timeout} throttle=${throttle}"\ + # "workers=${workers} maxjob=${maxjob} maxnode=${maxnode} bal=${bal} verbose=${verbose}"\ + # "verbosity=${verbosity} debug=${debug} extra=${extra} returncode=${returncode}" + + # read _host _user _pass <<<$($VENV/./venv.sh --host-credentials "${host}") + message "Concurrent executor testing is evaluating supplied options and preparing to execute." + . $VENV_BIN/activate && python3 $VENV/ce.py\ + --hostnames "${host}"\ + --user "${user}"\ + --pyz "${python}"\ + --zoau "${zoau}"\ + --pythonpath "${pythonpath}"\ + --volumes "${volumes}"\ + --paths "${file}"\ + --skip "${skip}"\ + --itr "${itr}"\ + --replay "${replay}"\ + --timeout "${timeout}"\ + "${throttle}"\ + --workers "${workers}"\ + --maxjob "${maxjob}"\ + --maxnode "${maxnode}"\ + --bal "${bal}"\ + "${verbose}"\ + --verbosity "${verbosity}"\ + "${debug}"\ + --extra "${extra}"\ + "${returncode}" } # ------------------------------------------------------------------------------ @@ -485,6 +885,203 @@ ac_test_config(){ fi } +# ------------------------------------------------------------------------------ +# Get a list of all test cases from the dependency finder depending on options +# ------------------------------------------------------------------------------ +#->test-dep-find: +## Determine which test suites to run given the options selected. +## Usage: ac --test-dep-find [--branch ] [--skip ] +## Options: +## branch (optional): +## - The branch to compare to when performing dependency analaysis. The +## comparison always uses the currently checked out local branch and +## compares that to the 'branch' supplied. +## - The default branch is 'dev' +## skip (optional): +## - Space or comma delimited test suites that should not be included +## in the result. +## - Supply only the test suite name, the tooling will prepend the +## necessay path. +## - Default is to skip 'test_module_security.py', this can not be removed but +## it can be replaced with another test or tests. +## pretty (optional): +## - Pretty formatting where each value is a line follwoed by a line feed, +## otherwise a list[str] format is returned. +## Example: +## $ ac --test-dep-find --branch main --skip "test_module_security.py,test_zos_apf_func.py" --pretty False +## $ ac --test-dep-find --branch dev --skip "test_zos_apf_func.py" +## $ ac --test-dep-find --branch main +## $ ac --test-dep-find + +ac_test_dep_finder(){ + branch=$1 + skip="$2" + gh_branch=`git branch |grep "*" | cut -d" " -f2` + + # Convert any comma separated strings to space delimited as needed by the tooling. + first_entry=true + skip_tests="" + for i in $(echo $skip | sed "s/,/ /g") # Optionally: skip=\"`echo $2 | tr ',' ' '`\" + do + if [ "$first_entry" == "true" ];then + first_entry=false + skip_tests="$CURR_DIR/tests/functional/modules/$i" + else + skip_tests="$skip_tests $CURR_DIR/tests/functional/modules/$i" + fi + done + skip=$skip_tests + + # If branch is defined provide results on that comparison branch else default to the `dev` branch. + if [ -z "$branch" ]; then + branch="dev" + message "Compiling a list functional and unit tests suites excluding skipped tests." + . $VENV_BIN/activate && result=`$VENV_BIN/python ${VENV}/dependencyfinder.py -p ${CURRENT_DIR} -a -s "${skip}"` + else + message "Compiling a list dependent tests cases to run based on the changes between local branch '$gh_branch' and target branch '$branch', excluding skipped tests." + . $VENV_BIN/activate && result=`$VENV_BIN/python ${VENV}/dependencyfinder.py -p ${CURRENT_DIR} -b ${branch} -s "${skip}" -m` + fi + + # Uppercase value for --pretty + pretty=`echo $3 | tr '[:lower:]' '[:upper:]'` + if [ "$pretty" == "TRUE" ];then + echo $result |tr '[[:space:]]' '\n' + else + export env_result=${result} + result=`$VENV_BIN/python3 -c "from os import environ;all = environ['env_result'].split(',');print(str(all))"` + echo $result + fi +} + +# ------------------------------------------------------------------------------ +# Get a list of all test cases using pytest including parameterization +# ------------------------------------------------------------------------------ +#->test-pytest-find: +## Get a list of parametizd test cases used by pytest +## Usage: ac --test-pytest-find [--file ] [--skip ] [--pretty ] +## Options: +## file (optional): +## - Space or comma delimited test suites that should be included +## in the result. +## - A test suite is a collection of test cases in a file that starts with +## 'test' and ends in '.py'. +## - For all functional tests, use the `functional/*` notation for directories. +## - For all unit tests, use the `unit/*` notation for directories. +## - Default is all functional and unit tests. +## - A directory of test cases is such that it contains test suites. +## skip (optional) - (str): +## - Space or comma delimited test suites that should be omitted +## in the result. +## - A test suite is a collection of test cases in a file that starts with +## 'test' and ends in '.py'. +## - Default is to skip 'test_module_security.py', this can not be removed but +## it can be replaced with another test or tests. +## - Test cases can be parametrized such they use the '::' syntax or not. +## - Skip does not support directories. +## pretty (optional): +## - Pretty formatting where each value is a line follwoed by a line feed, +## otherwise a list[str] format is returned. +## Example: +## $ ac --test-pytest-find --file "test_zos_copy_func.py,test_zos_mvs_raw_unit.py" --skip "test_zos_job_submit_func.py,test_module_security.py" --pretty false +## $ ac --test-pytest-find --file "functional/*,unit/*" --skip "test_module_security.py" +## $ ac --test-pytest-find --file "test_zos_copy_func.py" +## $ ac --test-pytest-find --pretty true +## $ ac --test-pytest-find|wc -l +ac_test_pytest_finder(){ + file=$1 + skip="$2" + slience_messages="$4" # Undocumented internal interface option to disable INFO messages + + first_entry=true + file_tests="" + strings_func=$(ls "$CURR_DIR/tests/functional/modules/") + strings_unit=$(ls "$CURR_DIR/tests/unit/") + + # As a courtesy, aid in shorter notation supplied to the utils module get_test_case(), the python module + # does not support such notation because the module can be run from many locations and requires absolute + # paths. + for i in $(echo $file | sed "s/,/ /g") # Optionally: skip=\"`echo $2 | tr ',' ' '`\" + do + if [ "$i" == "functional/*" ];then + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/functional/modules/" + else + file_tests="$file_tests $CURR_DIR/tests/functional/modules/" + fi + elif [ "$i" == "unit/*" ];then + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/unit/" + else + file_tests="$file_tests $CURR_DIR/tests/unit/" + fi + elif echo $strings_func | tr ' ' '\n'|grep $i >/dev/null; then + if [ ! -e "$CURR_DIR/tests/functional/modules/$i" ]; then + message_error "File = $CURR_DIR/tests/functional/modules/$i not found." + fi + + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/functional/modules/$i" + else + file_tests="$file_tests $CURR_DIR/tests/functional/modules/$i" + fi + elif echo $strings_unit | tr ' ' '\n'|grep $i >/dev/null; then + if [ ! -e "$CURR_DIR/tests/unit/$i" ]; then + message_error "File = $CURR_DIR/tests/unit/$i not found." + fi + + if [ "$first_entry" == "true" ];then + first_entry=false + file_tests="$CURR_DIR/tests/unit/" + else + file_tests="$file_tests $CURR_DIR/tests/unit/$i" + fi + else + message_error "File = $i, not found in project path = $CURR_DIR." + fi + done + first_entry=true + file=$file_tests + export env_file=${file} + + # Convert any comma separated strings to space delimited as needed by the tooling. + first_entry=true + skip_tests="" + for i in $(echo $skip | sed "s/,/ /g") # Optionally: skip=\"`echo $2 | tr ',' ' '`\" + do + if [ "$first_entry" == "true" ];then + first_entry=false + skip_tests="$CURR_DIR/tests/functional/modules/$i" + else + skip_tests="$skip_tests $CURR_DIR/tests/functional/modules/$i" + fi + done + + skip=$skip_tests + export env_skip=${skip} + + + slience_messages=`echo $slience_messages | tr '[:lower:]' '[:upper:]'` + if [ "$slience_messages" != "TRUE" ];then + message "Compiling a list of test cases for the provided test suites excluding any skip tests.." $slience_messages + fi + + . $VENV_BIN/activate && result=`export PYTHONPATH=$VENV;$VENV_BIN/python -c "from modules.utils import get_test_cases;from os import environ;\ + tests = get_test_cases(paths=environ['env_file'], skip=environ['env_skip']);all=','.join(tests);print(all)"` + + # Uppercase value for --pretty + pretty=`echo $3 | tr '[:lower:]' '[:upper:]'` + if [ "$pretty" == "TRUE" ];then + echo $result |tr ',' '\n'; + else + export env_result=${result} + result=`$VENV_BIN/python3 -c "from os import environ;all = environ['env_result'].split(',');print(str(all))"` + echo $result + fi +} + # ------------------------------------------------------------------------------ # Check the version of the ibm_zos_core collection installed # ------------------------------------------------------------------------------ @@ -637,16 +1234,28 @@ host_mounts(){ } # ------------------------------------------------------------------------------ -# Print the managed z/OS node IDs +# Print the z/OS node IDs and hostnames # ------------------------------------------------------------------------------ #->host-nodes: -## Display the z/OS managed node IDs. -## Usage: ac [--host-nodes] +## Display the z/OS node IDs and hostnames +## Usage: ac [--host-nodes --all ] +## Options: +## all - A list of all nodes, default is true. If all is set to false, +## only a list space delimited nodes are returned. ## Example: ## $ ac --host-nodes +## $ ac --host-nodes --all false host_nodes(){ - message "Print local managed node IDs." - $VENV/venv.sh --targets + + + if [ "$all" == "false" ]; then + message "Print z/OS production hostnames." + result=`$VENV/venv.sh --targets-production` + else + message "Print z/OS node IDs and hostnames." + result=`$VENV/venv.sh --targets` + fi + echo $result } # ------------------------------------------------------------------------------ @@ -672,7 +1281,6 @@ venv_setup(){ # ------------------------------------------------------------------------------ # Allows you to activate the lastet ansible managed virtual enviroments -# TODO: Allow user to specify which venv they can start # ------------------------------------------------------------------------------ #->venv-start: ## Activate the latest ansible managed virtual environment or optionally start @@ -695,12 +1303,11 @@ venv_start(){ message "Starting managed python virtual environment: $VENV_BASENAME" #. $VENV_BIN/activate; exec /bin/sh -i - /bin/bash -c ". $VENV_BIN/activate; exec /bin/sh -i" + /bin/bash -c ". $VENV_BIN/activate; exec /bin/sh -i;" } # ------------------------------------------------------------------------------ # Allows you to deactivate the lastet ansible managed virtual enviroments -# TODO: Allow user to specify which venv they can stop # ------------------------------------------------------------------------------ #->venv-stop: ## Deactivate the latest ansible managed virtual environment or optionally deactivate @@ -722,8 +1329,13 @@ venv_stop(){ fi message "Stopping managed ansible virtual environment located at: $VENV_BASENAME" - message "ac --venv-stop does not actually currently work, use CNTL-D" - . deactivate $VENV_BASENAME 2>/dev/null; + # message "ac --venv-stop does not actually currently work, use CNTL-D" + # . deactivate $VENV_BASENAME; + # deactivate venv/$VENV_BASENAME + venv_tty=`tty` + venv_tty=`basename $venv_tty` + venv_pid=`ps -ef |grep $venv_tty | grep -v "grep" | grep "/bin/sh -i" | awk '{print $3}'` + kill -9 $venv_pid > /dev/null 2>&1 } # ============================================================================== @@ -749,35 +1361,35 @@ while true; do fi exit ;; - --ac-bandit) # Command + --ac-bandit) # Command ensure_managed_venv_exists $1 option_submitted="--ac-bandit" ;; - --ac-build) # Command + --ac-build) # Command ensure_managed_venv_exists $1 option_submitted="--ac-build" ;; - --ac-galaxy-importer) # Command + --ac-galaxy-importer) # Command ensure_managed_venv_exists $1 option_submitted="--ac-galaxy-importer" ;; - --ac-changelog) # Command + --ac-changelog) # Command ensure_managed_venv_exists $1 option_submitted="--ac-changelog" ;; - --ac-module-doc) # Command + --ac-module-doc) # Command ensure_managed_venv_exists $1 option_submitted="--ac-module-doc" ;; - --ac-install) - ensure_managed_venv_exists $1 # Command + --ac-install) # Command + ensure_managed_venv_exists $1 option_submitted="--ac-install" ;; - --ac-lint) - ensure_managed_venv_exists $1 # Command + --ac-lint) # Command + ensure_managed_venv_exists $1 option_submitted="--ac-lint" ;; - --ac-sanity |--ac-sanity=?*) # Command + --ac-sanity |--ac-sanity=?*) # Command ensure_managed_venv_exists $1 option_submitted="--ac-sanity" ;; @@ -789,6 +1401,18 @@ while true; do ensure_managed_venv_exists $1 option_submitted="--ac-test-config" ;; + --test-concurrent|--test-concurrent=?*) # command + ensure_managed_venv_exists $1 + option_submitted="--test-concurrent" + ;; + --test-dep-find|--test-dep-find=?*) # command + ensure_managed_venv_exists $1 + option_submitted="--test-dep-find" + ;; + --test-pytest-find|--test-pytest-find=?*) # command + ensure_managed_venv_exists $1 + option_submitted="--test-pytest-find" + ;; --ac-version) # Command ensure_managed_venv_exists $1 option_submitted="--ac-version" @@ -805,7 +1429,7 @@ while true; do ensure_managed_venv_exists $1 option_submitted="--host-auth" ;; - --host-config) # Command + --host-config) # Command ensure_managed_venv_exists $1 option_submitted="--host-config" ;; @@ -813,8 +1437,8 @@ while true; do ensure_managed_venv_exists $1 option_submitted="--host-mount" ;; - --host-mounts) - ensure_managed_venv_exists $1 # Command + --host-mounts) # Command + ensure_managed_venv_exists $1 option_submitted="--host-mounts" ;; --host-nodes) # Command @@ -832,16 +1456,36 @@ while true; do ensure_managed_venv_exists $1 option_submitted="--venv-stop" ;; + --all|--all=?*) # option + all=`option_processor $1 $2` + option_sanitize $all + shift + ;; + --bal|--bal=?*) # option + bal=`option_processor $1 $2` + option_sanitize $bal + shift + ;; --command|--command=?*) # option command=`option_processor $1 $2` option_sanitize $command shift ;; + --branch|--branch=?*) # option + branch=`option_processor $1 $2` + option_sanitize $branch + shift + ;; --debug|--debug=?*) # option debug=`option_processor $1 $2` option_sanitize $debug shift ;; + --extra|--extra=?*) # option + extra=`option_processor $1 $2` + option_sanitize $extra + shift + ;; --file|--file=?*) # option file=`option_processor $1 $2` option_sanitize $file @@ -852,12 +1496,32 @@ while true; do option_sanitize $host shift ;; + --itr|--itr=?*) # option + itr=`option_processor $1 $2` + option_sanitize $itr + shift + ;; --level|--level=?*) # option level=`option_processor $1 $2` option_sanitize $level shift ;; - --name|--name=?*) # option + --level|--level=?*) # option + level=`option_processor $1 $2` + option_sanitize $level + shift + ;; + --maxjob|--maxjob=?*) # option + maxjob=`option_processor $1 $2` + option_sanitize $maxjob + shift + ;; + --maxnode|--maxnode=?*) # option + maxnode=`option_processor $1 $2` + option_sanitize $maxnode + shift + ;; + --name|--name=?*) # option name=`option_processor $1 $2` option_sanitize $name shift @@ -872,26 +1536,86 @@ while true; do option_sanitize $password shift ;; + --pretty|--pretty=?*) # option + pretty=`option_processor $1 $2` + option_sanitize $pretty + shift + ;; --python|--python=?*) # option python=`option_processor $1 $2` option_sanitize $python shift ;; + --pythonpath|--pythonpath=?*) # option + pythonpath=`option_processor $1 $2` + option_sanitize $pythonpath + shift + ;; + --replay|--replay=?*) # option + replay=`option_processor $1 "$2"` + option_sanitize "$replay" + shift + ;; + --returncode|--returncode=?*) # option + returncode=`option_processor $1 "$2"` + option_sanitize "$returncode" + shift + ;; + --skip|--skip=?*) # option + skip=`option_processor $1 "$2"` + option_sanitize "$skip" + shift + ;; --test|--test=?*) # option test=`option_processor $1 $2` option_sanitize $test shift ;; - # --tests|--tests=?*) # option + --timeout|--timeout=?*) # option + timeout=`option_processor $1 $2` + option_sanitize $timeout + shift + ;; + --throttle|--throttle=?*) # option + throttle=`option_processor $1 $2` + option_sanitize $throttle + shift + ;; + # --tests|--tests=?*) # option # tests=`option_processor $1 $2` # option_sanitize $tests # shift # ;; + --user|--user=?*) # option + user=`option_processor $1 $2` + option_sanitize $user + shift + ;; + --verbose|--verbose=?*) # option + verbose=`option_processor $1 $2` + option_sanitize $verbose + shift + ;; + --verbosity|--verbosity=?*) # option + verbosity=`option_processor $1 $2` + option_sanitize $verbosity + shift + ;; --version|--version=?*) # option version=`option_processor $1 $2` option_sanitize $version shift ;; + --workers|--workers=?*) # option + workers=`option_processor $1 $2` + option_sanitize $workers + shift + ;; + --volumes|--volumes=?*) # option + volumes=`option_processor $1 $2` + option_sanitize $volumes + shift + ;; --zoau|--zoau=?*) # option zoau=`option_processor $1 $2` option_sanitize $zoau @@ -940,8 +1664,19 @@ elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-sanity" ] ; then ac_sanity $version elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-test" ] ; then ac_test ${host:=""} ${python:=""} ${zoau:=""} ${file:=""} ${test:=""} ${debug:=""} +elif [ "$option_submitted" ] && [ "$option_submitted" = "--test-concurrent" ] ; then + test_concurrent ${host:=""} ${user:=""} ${python:=""} ${zoau:=""} ${pythonpath:=""}\ + ${volumes:="222222,000000"} ${file:="functional/*,unit/*"} "${skip:="test_module_security.py"}"\ + ${itr:="50"} ${replay:="5"} ${timeout:="300"} ${throttle:="True"} ${workers:="1"}\ + ${maxjob:="10"} ${maxnode:="30"} ${bal:="10"} ${verbose:="False"} ${verbosity:="0"}\ + ${debug:="False"} ${extra:="cd `pwd`"} ${returncode:="False"} elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-test-config" ] ; then ac_test_config +elif [ "$option_submitted" ] && [ "$option_submitted" = "--test-dep-find" ] ; then + ac_test_dep_finder ${branch:=""} "${skip:="test_module_security.py"}" ${pretty:="true"} +elif [ "$option_submitted" ] && [ "$option_submitted" = "--test-pytest-find" ] ; then + ac_test_pytest_finder ${file:="functional/*,unit/*"} "${skip:="test_module_security.py"}"\ + ${pretty:="true"} elif [ "$option_submitted" ] && [ "$option_submitted" = "--ac-version" ] ; then ac_version elif [ "$option_submitted" ] && [ "$option_submitted" = "--file-encrypt" ] ; then @@ -955,7 +1690,7 @@ elif [ "$option_submitted" ] && [ "$option_submitted" = "--host-mount" ] ; then elif [ "$option_submitted" ] && [ "$option_submitted" = "--host-mounts" ] ; then host_mounts elif [ "$option_submitted" ] && [ "$option_submitted" = "--host-nodes" ] ; then - host_nodes + host_nodes ${all} elif [ "$option_submitted" ] && [ "$option_submitted" = "--venv-setup" ] ; then venv_setup $password elif [ "$option_submitted" ] && [ "$option_submitted" = "--venv-start" ] ; then diff --git a/ansible.cfg b/ansible.cfg index a6d62f711..b452495ff 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -42,3 +42,6 @@ pipelining = True [colors] verbose = green + +[persistent_connection] +command_timeout = 60 diff --git a/changelogs/.plugin-cache.yaml b/changelogs/.plugin-cache.yaml index e5bd167b7..dcc631cd0 100644 --- a/changelogs/.plugin-cache.yaml +++ b/changelogs/.plugin-cache.yaml @@ -135,4 +135,4 @@ plugins: strategy: {} test: {} vars: {} -version: 1.10.0-beta.1 +version: 1.11.0-beta.1 diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 4d9648079..3c48425d7 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -259,6 +259,131 @@ releases: - 992-fix-sanity4to6.yml - v1.10.0-beta.1_summary.yml release_date: '2024-05-08' + 1.11.0-beta.1: + changes: + bugfixes: + - module_util/data_set.py - DataSet.data_set_cataloged function previously only + returned True or False, but failed to account for exceptions which occurred + during the LISTCAT. The fix now raises an MVSCmdExecError if the return code + from LISTCAT is too high. (https://github.com/ansible-collections/ibm_zos_core/pull/1535). + - zos_copy - a regression in version 1.4.0 made the module stop automatically + computing member names when copying a single file into a PDS/E. Fix now lets + a user copy a single file into a PDS/E without adding a member in the dest + option. (https://github.com/ansible-collections/ibm_zos_core/pull/1570). + - zos_copy - module would use opercmd to check if a non existent destination + data set is locked. Fix now only checks if the destination is already present. + (https://github.com/ansible-collections/ibm_zos_core/pull/1623). + - zos_job_submit - Was not propagating any error types UnicodeDecodeError, JSONDecodeError, + TypeError, KeyError when encountered, now the error message shares the type + error. (https://github.com/ansible-collections/ibm_zos_core/pull/1560). + - zos_mvs_raw - DD_output first character from each line was missing. Change + now includes the first character of each line. (https://github.com/ansible-collections/ibm_zos_core/pull/1543). + minor_changes: + - zos_apf - Change input to auto-escape 'library' names containing symbols (https://github.com/ansible-collections/ibm_zos_core/pull/1493). + - zos_archive - Added support for GDG and GDS relative name notation to archive + data sets. Added support for data set names with special characters like $, + /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1511). + - zos_backup_restore - Added support for GDS relative name notation to include or + exclude data sets when operation is backup. Added support for data set names + with special characters like $, /#, and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1527). + - zos_blockinfile - Added support for GDG and GDS relative name notation to + use a data set. And backup in new generations. Added support for data set + names with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1516). + - zos_copy - add support for copying generation data sets (GDS) and generation + data groups (GDG), as well as using a GDS for backup. (https://github.com/ansible-collections/ibm_zos_core/pull/1564). + - zos_data_set - Added support for GDG and GDS relative name notation to create, + delete, catalog and uncatalog a data set. Added support for data set names + with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1504). + - zos_encode - add support for encoding generation data sets (GDS), as well + as using a GDS for backup. (https://github.com/ansible-collections/ibm_zos_core/pull/1531). + - zos_fetch - add support for fetching generation data groups and generation + data sets. (https://github.com/ansible-collections/ibm_zos_core/pull/1519) + - zos_find - added support for GDG/GDS and special characters (https://github.com/ansible-collections/ibm_zos_core/pull/1518). + - zos_job_submit - Improved the copy to remote mechanic to avoid using deepcopy + that could result in failure for some systems. (https://github.com/ansible-collections/ibm_zos_core/pull/1561). + - zos_job_submit - add support for generation data groups and generation data + sets as sources for jobs. (https://github.com/ansible-collections/ibm_zos_core/pull/1497) + - zos_lineinfile - Added support for GDG and GDS relative name notation to use + a data set. And backup in new generations. Added support for data set names + with special characters like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1516). + - zos_mount - Added support for data set names with special characters ($, /#, + /- and @). This is for both src and backup data set names. (https://github.com/ansible-collections/ibm_zos_core/pull/1631). + - zos_mvs_raw - Added support for GDG and GDS relative name notation to use + a data set. Added support for data set names with special characters like + $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1525). + - zos_mvs_raw - Added support for GDG and GDS relative positive name notation + to use a data set. (https://github.com/ansible-collections/ibm_zos_core/pull/1541). + - zos_mvs_raw - Redesign the wrappers of dd clases to use properly the arguments. + (https://github.com/ansible-collections/ibm_zos_core/pull/1470). + - zos_tso_command - Added support for GDG and GDS relative name notation to use + a data set name. Added support for data set names with special characters + like $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1563). + - zos_script - Improved the copy to remote mechanic to avoid using deepcopy + that could result in failure for some systems. (https://github.com/ansible-collections/ibm_zos_core/pull/1561). + - zos_unarchive - Added support for data set names with special characters like + $, /#, /- and @. (https://github.com/ansible-collections/ibm_zos_core/pull/1511). + - zos_unarchive - Improved the copy to remote mechanic to avoid using deepcopy + that could result in failure for some systems. (https://github.com/ansible-collections/ibm_zos_core/pull/1561). + release_summary: 'Release Date: ''2024-08-05'' + + This changelog describes all changes made to the modules and plugins included + + in this collection. The release date is the date the changelog is created. + + For additional details such as required dependencies and availability review + + the collections `release notes `__' + fragments: + - 1170-enhancememt-make-pipeline-217-compatible.yml + - 1323-Update_docstring-dd_statement.yml + - 1334-update-docstring-mcs_cmd.yml + - 1335-update-docstring-template.yml + - 1337-update-docstring-vtoc.yml + - 1338-update-docstring-zoau_version_checker.yml + - 1342-update-docstring-zos_backup_restore.yml + - 1343-update-docstring-zos_blockinline.yml + - 1344-update-docstring-zos_copy.yml + - 1361-update-docstring-zos_operator.yml + - 1362-update-docstring-file.yml + - 1363-update-docstring-system.yml + - 1374-enhancement-zos-find-gdg-gds-special-chars.yml + - 1380-enhancement-add-sybols-zos_apf.yml + - 1384-update-docstring-backup.yml + - 1385-update-docstring-better_arg_parser.yml + - 1386-gdg-symbols-support.yml + - 1387-update-docstring-copy.yml + - 1415-Update_docstring-zos_archive.yml + - 1470-redesign_mvs_raw.yml + - 1484-update-ac-tool-ansible-lint.yml + - 1488-zos_copy-refactor-force.yml + - 1495-default-values-data-set-class.yml + - 1496-fix-gds-resolve.yml + - 1497-gdg-support-zos-job-submit.yml + - 1504-zos_data_set-gdg-support.yml + - 1507-zos_operator-docs.yml + - 1511-zos_archive_unarchive-gdg-support.yml + - 1512-bugfix-zos_job_submit-error-type.yml + - 1515-gdg_batch_creation.yml + - 1516-lineinfile_blockinfile_gdgsgds_and_special_character_support.yml + - 1519-zos_fetch-gdg-support.yml + - 1525-mvs_raw_support_gdg_gds_special_character.yml + - 1527-zos_backup-gdg.yml + - 1531-zos_encode_gdg_support.yml + - 1535-raise-error-in-module-util-data_set-function-data_set_cataloged.yml + - 1541-output_mvs_raw_gds_positive_was_false_positive.yml + - 1543-mvs_raw_fix_verbose_and_first_character.yml + - 1550-lower_case_idcams_utility.yml + - 1552-readme-support-updates.yml + - 1553-Console_parallel.yml + - 1561-remove_deep_copy.yml + - 1563-zos_tso_command-gdg-support.yml + - 1564-zos_copy_gdg_support.yml + - 1565-remove-deprecated-pipes-library.yml + - 1570-compute-member-name-zos_copy.yml + - 1623-zos_copy-avoid-opercmd.yml + - 1631-enabler-zos_mount-special-character-support.yml + - v1.11.0-beta.1_summary.yml + release_date: '2024-08-05' 1.2.1: changes: bugfixes: diff --git a/changelogs/fragments/1641-case-sensitivity-zos_operator.yml b/changelogs/fragments/1641-case-sensitivity-zos_operator.yml new file mode 100644 index 000000000..1079776f9 --- /dev/null +++ b/changelogs/fragments/1641-case-sensitivity-zos_operator.yml @@ -0,0 +1,4 @@ +minor_changes: + - zos_operator - Added new option ``case_sensitive`` to module, allowing users + to control how case in a command is handled by it. + (https://github.com/ansible-collections/ibm_zos_core/pull/1641) \ No newline at end of file diff --git a/changelogs/fragments/1658-job_submit_portability.yml b/changelogs/fragments/1658-job_submit_portability.yml new file mode 100644 index 000000000..83aeb281d --- /dev/null +++ b/changelogs/fragments/1658-job_submit_portability.yml @@ -0,0 +1,3 @@ +trivial: + - zos_job_submit - Remove the use of hard coded dataset and files names. + (https://github.com/ansible-collections/ibm_zos_core/pull/1658). \ No newline at end of file diff --git a/changelogs/fragments/828-adds-concurrent-executor.yml b/changelogs/fragments/828-adds-concurrent-executor.yml new file mode 100644 index 000000000..9da97836e --- /dev/null +++ b/changelogs/fragments/828-adds-concurrent-executor.yml @@ -0,0 +1,7 @@ +trivial: + - ce.py - Adds the concurrent executor capable of running + test cases concurrently against a pool of managed nodes. + (https://github.com/ansible-collections/ibm_zos_core/pull/828). + - zinventory-raw - a new pytest fixture that can accept a JSON + vs a configuration file. + (https://github.com/ansible-collections/ibm_zos_core/pull/828). diff --git a/changelogs/fragments/v1.10.0_summary.yml b/changelogs/fragments/v1.11.0-beta.1_summary.yml similarity index 92% rename from changelogs/fragments/v1.10.0_summary.yml rename to changelogs/fragments/v1.11.0-beta.1_summary.yml index 129c40746..5c1d60f94 100644 --- a/changelogs/fragments/v1.10.0_summary.yml +++ b/changelogs/fragments/v1.11.0-beta.1_summary.yml @@ -1,5 +1,5 @@ release_summary: | - Release Date: '2024-06-11' + Release Date: '2024-08-05' This changelog describes all changes made to the modules and plugins included in this collection. The release date is the date the changelog is created. For additional details such as required dependencies and availability review diff --git a/docs/source/modules/zos_apf.rst b/docs/source/modules/zos_apf.rst index 265d3fff5..a94fdc95e 100644 --- a/docs/source/modules/zos_apf.rst +++ b/docs/source/modules/zos_apf.rst @@ -37,7 +37,7 @@ library state - Ensure that the library is added \ :literal:`state=present`\ or removed \ :literal:`state=absent`\ . + Ensure that the library is added ``state=present`` or removed ``state=absent``. The APF list format has to be "DYNAMIC". @@ -58,24 +58,24 @@ force_dynamic volume - The identifier for the volume containing the library specified in the \ :literal:`library`\ parameter. The values must be one the following. + The identifier for the volume containing the library specified in the ``library`` parameter. The values must be one the following. 1. The volume serial number. - 2. Six asterisks \ :literal:`\*\*\*\*\*\*`\ , indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + 2. Six asterisks ``******``, indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. + 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. - If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. + If ``volume`` is not specified, ``library`` has to be cataloged. | **required**: False | **type**: str sms - Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - If \ :literal:`sms=True`\ , \ :literal:`volume`\ value will be ignored. + If ``sms=True``, ``volume`` value will be ignored. | **required**: False | **type**: bool @@ -83,13 +83,13 @@ sms operation - Change APF list format to "DYNAMIC" \ :literal:`operation=set\_dynamic`\ or "STATIC" \ :literal:`operation=set\_static`\ + Change APF list format to "DYNAMIC" ``operation=set_dynamic`` or "STATIC" ``operation=set_static`` - Display APF list current format \ :literal:`operation=check\_format`\ + Display APF list current format ``operation=check_format`` - Display APF list entries when \ :literal:`operation=list`\ \ :literal:`library`\ , \ :literal:`volume`\ and \ :literal:`sms`\ will be used as filters. + Display APF list entries when ``operation=list`` ``library``, ``volume`` and ``sms`` will be used as filters. - If \ :literal:`operation`\ is not set, add or remove operation will be ignored. + If ``operation`` is not set, add or remove operation will be ignored. | **required**: False | **type**: str @@ -99,23 +99,23 @@ operation tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str persistent - Add/remove persistent entries to or from \ :emphasis:`data\_set\_name`\ + Add/remove persistent entries to or from *data_set_name* - \ :literal:`library`\ will not be persisted or removed if \ :literal:`persistent=None`\ + ``library`` will not be persisted or removed if ``persistent=None`` | **required**: False | **type**: dict data_set_name - The data set name used for persisting or removing a \ :literal:`library`\ from the APF list. + The data set name used for persisting or removing a ``library`` from the APF list. | **required**: True | **type**: str @@ -124,13 +124,13 @@ persistent marker The marker line template. - \ :literal:`{mark}`\ will be replaced with "BEGIN" and "END". + ``{mark}`` will be replaced with "BEGIN" and "END". - Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. + Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. - \ :literal:`{mark}`\ length may not exceed 72 characters. + ``{mark}`` length may not exceed 72 characters. - The timestamp (\) used in the default marker follows the '+%Y%m%d-%H%M%S' date format + The timestamp () used in the default marker follows the '+%Y%m%d-%H%M%S' date format | **required**: False | **type**: str @@ -138,9 +138,9 @@ persistent backup - Creates a backup file or backup data set for \ :emphasis:`data\_set\_name`\ , including the timestamp information to ensure that you retrieve the original APF list defined in \ :emphasis:`data\_set\_name`\ ". + Creates a backup file or backup data set for *data_set_name*, including the timestamp information to ensure that you retrieve the original APF list defined in *data_set_name*". - \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . + *backup_name* can be used to specify a backup file name if *backup=true*. The backup file name will be return on either success or failure of module execution such that data can be retrieved. @@ -152,11 +152,11 @@ persistent backup_name Specify the USS file name or data set name for the destination backup. - If the source \ :emphasis:`data\_set\_name`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. + If the source *data_set_name* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup\_name must be an MVS data set name. + If the source is an MVS data set, the backup_name must be an MVS data set name. - If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . + If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -168,9 +168,9 @@ persistent batch A list of dictionaries for adding or removing libraries. - This is mutually exclusive with \ :literal:`library`\ , \ :literal:`volume`\ , \ :literal:`sms`\ + This is mutually exclusive with ``library``, ``volume``, ``sms`` - Can be used with \ :literal:`persistent`\ + Can be used with ``persistent`` | **required**: False | **type**: list @@ -185,24 +185,24 @@ batch volume - The identifier for the volume containing the library specified on the \ :literal:`library`\ parameter. The values must be one of the following. + The identifier for the volume containing the library specified on the ``library`` parameter. The values must be one of the following. 1. The volume serial number - 2. Six asterisks \ :literal:`\*\*\*\*\*\*`\ , indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + 2. Six asterisks ``******``, indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - 3. \*MCAT\*, indicating that the system must use the volume serial number of the volume containing the master catalog. + 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. - If \ :literal:`volume`\ is not specified, \ :literal:`library`\ has to be cataloged. + If ``volume`` is not specified, ``library`` has to be cataloged. | **required**: False | **type**: str sms - Indicates that the library specified in the \ :literal:`library`\ parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - If true \ :literal:`volume`\ will be ignored. + If true ``volume`` will be ignored. | **required**: False | **type**: bool @@ -283,9 +283,9 @@ Return Values stdout The stdout from ZOAU command apfadm. Output varies based on the type of operation. - state\> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm + state> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm - operation\> stdout of operation options list\> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set\_dynamic\> Set to DYNAMIC set\_static\> Set to STATIC check\_format\> DYNAMIC or STATIC + operation> stdout of operation options list> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set_dynamic> Set to DYNAMIC set_static> Set to STATIC check_format> DYNAMIC or STATIC | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_archive.rst b/docs/source/modules/zos_archive.rst index b900fdcdb..bca1c5e82 100644 --- a/docs/source/modules/zos_archive.rst +++ b/docs/source/modules/zos_archive.rst @@ -20,7 +20,7 @@ Synopsis - Sources for archiving must be on the remote z/OS system. - Supported sources are USS (UNIX System Services) or z/OS data sets. - The archive remains on the remote z/OS system. -- For supported archive formats, see option \ :literal:`format`\ . +- For supported archive formats, see option ``format``. @@ -35,7 +35,9 @@ src USS file paths should be absolute paths. - MVS data sets supported types are: \ :literal:`SEQ`\ , \ :literal:`PDS`\ , \ :literal:`PDSE`\ . + GDS relative notation is supported. + + MVS data sets supported types are: ``SEQ``, ``PDS``, ``PDSE``. VSAMs are not supported. @@ -68,7 +70,7 @@ format terse_pack - Compression option for use with the terse format, \ :emphasis:`name=terse`\ . + Compression option for use with the terse format, *name=terse*. Pack will compress records in a data set so that the output results in lossless data compression. @@ -88,14 +90,14 @@ format If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - When providing the \ :emphasis:`xmit\_log\_data\_set`\ name, ensure there is adequate space. + When providing the *xmit_log_data_set* name, ensure there is adequate space. | **required**: False | **type**: str use_adrdssu - If set to true, the \ :literal:`zos\_archive`\ module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using \ :literal:`xmit`\ or \ :literal:`terse`\ . + If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using ``xmit`` or ``terse``. | **required**: False | **type**: bool @@ -107,30 +109,30 @@ format dest The remote absolute path or data set where the archive should be created. - \ :emphasis:`dest`\ can be a USS file or MVS data set name. + *dest* can be a USS file or MVS data set name. - If \ :emphasis:`dest`\ has missing parent directories, they will be created. + If *dest* has missing parent directories, they will be created. - If \ :emphasis:`dest`\ is a nonexistent USS file, it will be created. + If *dest* is a nonexistent USS file, it will be created. - If \ :emphasis:`dest`\ is an existing file or data set and \ :emphasis:`force=true`\ , the existing \ :emphasis:`dest`\ will be deleted and recreated with attributes defined in the \ :emphasis:`dest\_data\_set`\ option or computed by the module. + If *dest* is an existing file or data set and *force=true*, the existing *dest* will be deleted and recreated with attributes defined in the *dest_data_set* option or computed by the module. - If \ :emphasis:`dest`\ is an existing file or data set and \ :emphasis:`force=false`\ or not specified, the module exits with a note to the user. + If *dest* is an existing file or data set and *force=false* or not specified, the module exits with a note to the user. - Destination data set attributes can be set using \ :emphasis:`dest\_data\_set`\ . + Destination data set attributes can be set using *dest_data_set*. - Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the \ :emphasis:`dest\_data\_set`\ option will improve performance. + Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the *dest_data_set* option will improve performance. | **required**: True | **type**: str exclude - Remote absolute path, glob, or list of paths, globs or data set name patterns for the file, files or data sets to exclude from src list and glob expansion. + Remote absolute path, glob, or list of paths, globs, data set name patterns or generation data sets (GDSs) in relative notation for the file, files or data sets to exclude from src list and glob expansion. - Patterns (wildcards) can contain one of the following, \`?\`, \`\*\`. + Patterns (wildcards) can contain one of the following, `?`, `*`. - \* matches everything. + * matches everything. ? matches any single character. @@ -144,7 +146,7 @@ group When left unspecified, it uses the current group of the current use unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. + This option is only applicable if ``dest`` is USS, otherwise ignored. | **required**: False | **type**: str @@ -153,13 +155,13 @@ group mode The permission of the destination archive file. - If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. + If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. The mode may also be specified as a symbolic mode (for example, 'u+rwx' or 'u=rw,g=r,o=r') or a special string 'preserve'. - \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the src file. + *mode=preserve* means that the file will be given the same permissions as the src file. | **required**: False | **type**: str @@ -170,14 +172,14 @@ owner When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. + This option is only applicable if ``dest`` is USS, otherwise ignored. | **required**: False | **type**: str remove - Remove any added source files , trees or data sets after module \ `zos\_archive <./zos_archive.html>`__\ adds them to the archive. Source files, trees and data sets are identified with option \ :emphasis:`src`\ . + Remove any added source files , trees or data sets after module `zos_archive <./zos_archive.html>`_ adds them to the archive. Source files, trees and data sets are identified with option *src*. | **required**: False | **type**: bool @@ -185,7 +187,7 @@ remove dest_data_set - Data set attributes to customize a \ :literal:`dest`\ data set to be archived into. + Data set attributes to customize a ``dest`` data set to be archived into. | **required**: False | **type**: dict @@ -208,18 +210,18 @@ dest_data_set space_primary - If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. + If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int space_secondary - If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. + If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int @@ -228,7 +230,7 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. | **required**: False | **type**: str @@ -236,7 +238,7 @@ dest_data_set record_format - If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`FB`\ ) + If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) Choices are case-sensitive. @@ -313,18 +315,18 @@ dest_data_set tmp_hlq Override the default high level qualifier (HLQ) for temporary data sets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. | **required**: False | **type**: str force - If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ will be deleted. Otherwise it will be created with the \ :literal:`dest\_data\_set`\ attributes or default values if \ :literal:`dest\_data\_set`\ is not specified. + If set to ``true`` and the remote file or data set ``dest`` will be deleted. Otherwise it will be created with the ``dest_data_set`` attributes or default values if ``dest_data_set`` is not specified. - If set to \ :literal:`false`\ , the file or data set will only be copied if the destination does not exist. + If set to ``false``, the file or data set will only be copied if the destination does not exist. - If set to \ :literal:`false`\ and destination exists, the module exits with a note to the user. + If set to ``false`` and destination exists, the module exits with a note to the user. | **required**: False | **type**: bool @@ -348,7 +350,7 @@ Examples name: tar # Archive multiple files - - name: Compress list of files into a zip + - name: Archive list of files into a zip zos_archive: src: - /tmp/archive/foo.txt @@ -358,7 +360,7 @@ Examples name: zip # Archive one data set into terse - - name: Compress data set into a terse + - name: Archive data set into a terse zos_archive: src: "USER.ARCHIVE.TEST" dest: "USER.ARCHIVE.RESULT.TRS" @@ -366,7 +368,7 @@ Examples name: terse # Use terse with different options - - name: Compress data set into a terse, specify pack algorithm and use adrdssu + - name: Archive data set into a terse, specify pack algorithm and use adrdssu zos_archive: src: "USER.ARCHIVE.TEST" dest: "USER.ARCHIVE.RESULT.TRS" @@ -377,7 +379,7 @@ Examples use_adrdssu: true # Use a pattern to store - - name: Compress data set pattern using xmit + - name: Archive data set pattern using xmit zos_archive: src: "USER.ARCHIVE.*" exclude_sources: "USER.ARCHIVE.EXCLUDE.*" @@ -385,6 +387,27 @@ Examples format: name: xmit + - name: Archive multiple GDSs into a terse + zos_archive: + src: + - "USER.GDG(0)" + - "USER.GDG(-1)" + - "USER.GDG(-2)" + dest: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + format_options: + use_adrdssu: true + + - name: Archive multiple data sets into a new GDS + zos_archive: + src: "USER.ARCHIVE.*" + dest: "USER.GDG(+1)" + format: + name: terse + format_options: + use_adrdssu: true + @@ -392,11 +415,11 @@ Notes ----- .. note:: - This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos\_fetch to retrieve to the controller and then zos\_copy or zos\_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. + This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos_fetch to retrieve to the controller and then zos_copy or zos_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. - When packing and using \ :literal:`use\_adrdssu`\ flag the module will take up to two times the space indicated in \ :literal:`dest\_data\_set`\ . + When packing and using ``use_adrdssu`` flag the module will take up to two times the space indicated in ``dest_data_set``. - tar, zip, bz2 and pax are archived using python \ :literal:`tarfile`\ library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. + tar, zip, bz2 and pax are archived using python ``tarfile`` library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. @@ -416,27 +439,27 @@ Return Values state - The state of the input \ :literal:`src`\ . + The state of the input ``src``. - \ :literal:`absent`\ when the source files or data sets were removed. + ``absent`` when the source files or data sets were removed. - \ :literal:`present`\ when the source files or data sets were not removed. + ``present`` when the source files or data sets were not removed. - \ :literal:`incomplete`\ when \ :literal:`remove`\ was true and the source files or data sets were not removed. + ``incomplete`` when ``remove`` was true and the source files or data sets were not removed. | **returned**: always | **type**: str dest_state - The state of the \ :emphasis:`dest`\ file or data set. + The state of the *dest* file or data set. - \ :literal:`absent`\ when the file does not exist. + ``absent`` when the file does not exist. - \ :literal:`archive`\ when the file is an archive. + ``archive`` when the file is an archive. - \ :literal:`compress`\ when the file is compressed, but not an archive. + ``compress`` when the file is compressed, but not an archive. - \ :literal:`incomplete`\ when the file is an archive, but some files under \ :emphasis:`src`\ were not found. + ``incomplete`` when the file is an archive, but some files under *src* were not found. | **returned**: success | **type**: str @@ -454,7 +477,7 @@ archived | **type**: list arcroot - If \ :literal:`src`\ is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. + If ``src`` is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_blockinfile.rst b/docs/source/modules/zos_blockinfile.rst index 8cd6f756c..fdd98d0f8 100644 --- a/docs/source/modules/zos_blockinfile.rst +++ b/docs/source/modules/zos_blockinfile.rst @@ -33,14 +33,16 @@ src The USS file must be an absolute pathname. + Generation data set (GDS) relative name of generation already created. ``e.g. SOME.CREATION(-1).`` + | **required**: True | **type**: str state - Whether the block should be inserted or replaced using \ :emphasis:`state=present`\ . + Whether the block should be inserted or replaced using *state=present*. - Whether the block should be removed using \ :emphasis:`state=absent`\ . + Whether the block should be removed using *state=absent*. | **required**: False | **type**: str @@ -51,9 +53,9 @@ state marker The marker line template. - \ :literal:`{mark}`\ will be replaced with the values \ :literal:`in marker\_begin`\ (default="BEGIN") and \ :literal:`marker\_end`\ (default="END"). + ``{mark}`` will be replaced with the values ``in marker_begin`` (default="BEGIN") and ``marker_end`` (default="END"). - Using a custom marker without the \ :literal:`{mark}`\ variable may result in the block being repeatedly inserted on subsequent playbook runs. + Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. | **required**: False | **type**: str @@ -63,7 +65,7 @@ marker block The text to insert inside the marker lines. - Multi-line can be separated by '\\n'. + Multi-line can be separated by '\n'. Any double-quotation marks will be removed. @@ -74,11 +76,11 @@ block insertafter If specified, the block will be inserted after the last match of the specified regular expression. - A special value \ :literal:`EOF`\ for inserting a block at the end of the file is available. + A special value ``EOF`` for inserting a block at the end of the file is available. - If a specified regular expression has no matches, \ :literal:`EOF`\ will be used instead. + If a specified regular expression has no matches, ``EOF`` will be used instead. - Choices are EOF or '\*regex\*'. + Choices are EOF or '*regex*'. Default is EOF. @@ -89,18 +91,18 @@ insertafter insertbefore If specified, the block will be inserted before the last match of specified regular expression. - A special value \ :literal:`BOF`\ for inserting the block at the beginning of the file is available. + A special value ``BOF`` for inserting the block at the beginning of the file is available. If a specified regular expression has no matches, the block will be inserted at the end of the file. - Choices are BOF or '\*regex\*'. + Choices are BOF or '*regex*'. | **required**: False | **type**: str marker_begin - This will be inserted at \ :literal:`{mark}`\ in the opening ansible block marker. + This will be inserted at ``{mark}`` in the opening ansible block marker. | **required**: False | **type**: str @@ -108,7 +110,7 @@ marker_begin marker_end - This will be inserted at \ :literal:`{mark}`\ in the closing ansible block marker. + This will be inserted at ``{mark}`` in the closing ansible block marker. | **required**: False | **type**: str @@ -116,12 +118,14 @@ marker_end backup - Specifies whether a backup of destination should be created before editing the source \ :emphasis:`src`\ . + Specifies whether a backup of destination should be created before editing the source *src*. - When set to \ :literal:`true`\ , the module creates a backup file or data set. + When set to ``true``, the module creates a backup file or data set. The backup file name will be returned on either success or failure of module execution such that data can be retrieved. + Use generation data set (GDS) relative positive name. ``e.g. SOME.CREATION(+1``) + | **required**: False | **type**: bool | **default**: False @@ -130,15 +134,15 @@ backup backup_name Specify the USS file name or data set name for the destination backup. - If the source \ :emphasis:`src`\ is a USS file or path, the backup\_name name must be a file or path name, and the USS file or path must be an absolute path name. + If the source *src* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup\_name name must be an MVS data set name, and the dataset must not be preallocated. + If the source is an MVS data set, the backup_name name must be an MVS data set name, and the dataset must not be preallocated. - If the backup\_name is not provided, the default backup\_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . + If the backup_name is not provided, the default backup_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. - If \ :emphasis:`src`\ is a data set member and backup\_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + If *src* is a data set member and backup_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. | **required**: False | **type**: str @@ -147,14 +151,14 @@ backup_name tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str encoding - The character set of the source \ :emphasis:`src`\ . \ `zos\_blockinfile <./zos_blockinfile.html>`__\ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + The character set of the source *src*. `zos_blockinfile <./zos_blockinfile.html>`_ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -168,7 +172,7 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - The \ :literal:`force`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . + The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. | **required**: False | **type**: bool @@ -281,6 +285,20 @@ Examples marker_end: "End Ansible Block Insertion 2" block: "{{ CONTENT }}" + - name: Add a block to a gds + zos_blockinfile: + src: TEST.SOME.CREATION(0) + insertafter: EOF + block: "{{ CONTENT }}" + + - name: Add a block to dataset and backup in a new generation of gds + zos_blockinfile: + src: SOME.CREATION.TEST + insertbefore: BOF + backup: true + backup_name: CREATION.GDS(+1) + block: "{{ CONTENT }}" + @@ -290,13 +308,13 @@ Notes .. note:: It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The \ `zos\_data\_set <./zos_data_set.html>`__\ module can be used to catalog uncataloged data sets. + All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. - For supported character sets used to encode data, refer to the \ `documentation `__\ . + For supported character sets used to encode data, refer to the `documentation `_. - When using \`\`with\_\*\`\` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. + When using ``with_*`` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. - When more then one block should be handled in a file you must change the \ :emphasis:`marker`\ per task. + When more then one block should be handled in a file you must change the *marker* per task. @@ -315,7 +333,7 @@ Return Values changed - Indicates if the source was modified. Value of 1 represents \`true\`, otherwise \`false\`. + Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. | **returned**: success | **type**: bool diff --git a/docs/source/modules/zos_copy.rst b/docs/source/modules/zos_copy.rst index 69639e39a..8e8cb42bf 100644 --- a/docs/source/modules/zos_copy.rst +++ b/docs/source/modules/zos_copy.rst @@ -16,7 +16,7 @@ zos_copy -- Copy data to z/OS Synopsis -------- -- The \ `zos\_copy <./zos_copy.html>`__\ module copies a file or data set from a local or a remote machine to a location on the remote machine. +- The `zos_copy <./zos_copy.html>`_ module copies a file or data set from a local or a remote machine to a location on the remote machine. @@ -27,17 +27,17 @@ Parameters asa_text - If set to \ :literal:`true`\ , indicates that either \ :literal:`src`\ or \ :literal:`dest`\ or both contain ASA control characters. + If set to ``true``, indicates that either ``src`` or ``dest`` or both contain ASA control characters. - When \ :literal:`src`\ is a USS file and \ :literal:`dest`\ is a data set, the copy will preserve ASA control characters in the destination. + When ``src`` is a USS file and ``dest`` is a data set, the copy will preserve ASA control characters in the destination. - When \ :literal:`src`\ is a data set containing ASA control characters and \ :literal:`dest`\ is a USS file, the copy will put all control characters as plain text in the destination. + When ``src`` is a data set containing ASA control characters and ``dest`` is a USS file, the copy will put all control characters as plain text in the destination. - If \ :literal:`dest`\ is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). + If ``dest`` is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). - If neither \ :literal:`src`\ or \ :literal:`dest`\ have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. + If neither ``src`` or ``dest`` have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. - This option is only valid for text files. If \ :literal:`is\_binary`\ is \ :literal:`true`\ or \ :literal:`executable`\ is \ :literal:`true`\ as well, the module will fail. + This option is only valid for text files. If ``is_binary`` is ``true`` or ``executable`` is ``true`` as well, the module will fail. | **required**: False | **type**: bool @@ -47,7 +47,7 @@ asa_text backup Specifies whether a backup of the destination should be created before copying data. - When set to \ :literal:`true`\ , the module creates a backup file or data set. + When set to ``true``, the module creates a backup file or data set. The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -59,26 +59,26 @@ backup backup_name Specify a unique USS file name or data set name for the destination backup. - If the destination \ :literal:`dest`\ is a USS file or path, the \ :literal:`backup\_name`\ must be an absolute path name. + If the destination ``dest`` is a USS file or path, the ``backup_name`` must be an absolute path name. - If the destination is an MVS data set name, the \ :literal:`backup\_name`\ provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. + If the destination is an MVS data set name, the ``backup_name`` provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. - If the \ :literal:`backup\_name`\ is not provided, the default \ :literal:`backup\_name`\ will be used. If the \ :literal:`dest`\ is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . If the \ :literal:`dest`\ is an MVS data set, it will be a data set with a randomly generated name. + If the ``backup_name`` is not provided, the default ``backup_name`` will be used. If the ``dest`` is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the ``dest`` is an MVS data set, it will be a data set with a randomly generated name. - If \ :literal:`dest`\ is a data set member and \ :literal:`backup\_name`\ is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + If ``dest`` is a data set member and ``backup_name`` is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. - If \ :emphasis:`backup\_name`\ is a generation data set (GDS), it must be a relative positive name (for example, \ :literal:`HLQ.USER.GDG(+1)`\ ). + If *backup_name* is a generation data set (GDS), it must be a relative positive name (for example, V(HLQ.USER.GDG(+1\))). | **required**: False | **type**: str content - When used instead of \ :literal:`src`\ , sets the contents of a file or data set directly to the specified value. + When used instead of ``src``, sets the contents of a file or data set directly to the specified value. - Works only when \ :literal:`dest`\ is a USS file, sequential data set, or a partitioned data set member. + Works only when ``dest`` is a USS file, sequential data set, or a partitioned data set member. - If \ :literal:`dest`\ is a directory, then content will be copied to \ :literal:`/path/to/dest/inline\_copy`\ . + If ``dest`` is a directory, then content will be copied to ``/path/to/dest/inline_copy``. | **required**: False | **type**: str @@ -87,31 +87,33 @@ content dest The remote absolute path or data set where the content should be copied to. - \ :literal:`dest`\ can be a USS file, directory or MVS data set name. + ``dest`` can be a USS file, directory or MVS data set name. - If \ :literal:`dest`\ has missing parent directories, they will be created. + If ``dest`` has missing parent directories, they will be created. - If \ :literal:`dest`\ is a nonexistent USS file, it will be created. + If ``dest`` is a nonexistent USS file, it will be created. - If \ :literal:`dest`\ is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the module will fail. + If ``dest`` is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the module will fail. - If \ :literal:`dest`\ is a nonexistent data set, it will be created following the process outlined here and in the \ :literal:`volume`\ option. + If ``dest`` is a nonexistent data set, it will be created following the process outlined here and in the ``volume`` option. - If \ :literal:`dest`\ is a nonexistent data set, the attributes assigned will depend on the type of \ :literal:`src`\ . If \ :literal:`src`\ is a USS file, \ :literal:`dest`\ will have a Fixed Block (FB) record format and the remaining attributes will be computed. If \ :emphasis:`is\_binary=true`\ , \ :literal:`dest`\ will have a Fixed Block (FB) record format with a record length of 80, block size of 32760, and the remaining attributes will be computed. If \ :emphasis:`executable=true`\ ,\ :literal:`dest`\ will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. + If ``dest`` is a nonexistent data set, the attributes assigned will depend on the type of ``src``. If ``src`` is a USS file, ``dest`` will have a Fixed Block (FB) record format and the remaining attributes will be computed. If *is_binary=true*, ``dest`` will have a Fixed Block (FB) record format with a record length of 80, block size of 32760, and the remaining attributes will be computed. If *executable=true*,``dest`` will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. - When \ :literal:`dest`\ is a data set, precedence rules apply. If \ :literal:`dest\_data\_set`\ is set, this will take precedence over an existing data set. If \ :literal:`dest`\ is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, \ :literal:`dest`\ will be created with the same attributes of \ :literal:`src`\ . + If ``src`` is a file and ``dest`` a partitioned data set, ``dest`` does not need to include a member in its value, the module can automatically compute the resulting member name from ``src``. - When the \ :literal:`dest`\ is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) \ :literal:`dest`\ will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. + When ``dest`` is a data set, precedence rules apply. If ``dest_data_set`` is set, this will take precedence over an existing data set. If ``dest`` is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, ``dest`` will be created with the same attributes of ``src``. - When the \ :literal:`dest`\ is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. + When the ``dest`` is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) ``dest`` will be deleted and recreated following the process outlined in the ``volume`` option. - When \ :literal:`dest`\ is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the \ :literal:`volume`\ option. + When the ``dest`` is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the ``volume`` option. - \ :literal:`dest`\ can be a previously allocated generation data set (GDS) or a new GDS. + When ``dest`` is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the ``volume`` option. - When \ :literal:`dest`\ is a generation data group (GDG), \ :literal:`src`\ must be a GDG too. The copy will allocate successive new generations in \ :literal:`dest`\ , the module will verify it has enough available generations before starting the copy operations. + ``dest`` can be a previously allocated generation data set (GDS) or a new GDS. - When \ :literal:`dest`\ is a data set, you can override storage management rules by specifying \ :literal:`volume`\ if the storage class being used has GUARANTEED\_SPACE=YES specified, otherwise, the allocation will fail. See \ :literal:`volume`\ for more volume related processes. + When ``dest`` is a generation data group (GDG), ``src`` must be a GDG too. The copy will allocate successive new generations in ``dest``, the module will verify it has enough available generations before starting the copy operations. + + When ``dest`` is a data set, you can override storage management rules by specifying ``volume`` if the storage class being used has GUARANTEED_SPACE=YES specified, otherwise, the allocation will fail. See ``volume`` for more volume related processes. | **required**: True | **type**: str @@ -120,9 +122,9 @@ dest encoding Specifies which encodings the destination file or data set should be converted from and to. - If \ :literal:`encoding`\ is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. + If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. - Only valid if \ :literal:`is\_binary`\ is false. + Only valid if ``is_binary`` is false. | **required**: False | **type**: dict @@ -146,22 +148,22 @@ encoding tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str force - If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ is empty, the \ :literal:`dest`\ will be reused. + If set to ``true`` and the remote file or data set ``dest`` is empty, the ``dest`` will be reused. - If set to \ :literal:`true`\ and the remote file or data set \ :literal:`dest`\ is NOT empty, the \ :literal:`dest`\ will be deleted and recreated with the \ :literal:`src`\ data set attributes, otherwise it will be recreated with the \ :literal:`dest`\ data set attributes. + If set to ``true`` and the remote file or data set ``dest`` is NOT empty, the ``dest`` will be deleted and recreated with the ``src`` data set attributes, otherwise it will be recreated with the ``dest`` data set attributes. - To backup data before any deletion, see parameters \ :literal:`backup`\ and \ :literal:`backup\_name`\ . + To backup data before any deletion, see parameters ``backup`` and ``backup_name``. - If set to \ :literal:`false`\ , the file or data set will only be copied if the destination does not exist. + If set to ``false``, the file or data set will only be copied if the destination does not exist. - If set to \ :literal:`false`\ and destination exists, the module exits with a note to the user. + If set to ``false`` and destination exists, the module exits with a note to the user. | **required**: False | **type**: bool @@ -169,11 +171,11 @@ force force_lock - By default, when \ :literal:`dest`\ is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use \ :literal:`force\_lock`\ to bypass this check and continue with copy. + By default, when ``dest`` is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use ``force_lock`` to bypass this check and continue with copy. - If set to \ :literal:`true`\ and destination is a MVS data set opened by another process then zos\_copy will try to copy using DISP=SHR. + If set to ``true`` and destination is a MVS data set opened by another process then zos_copy will try to copy using DISP=SHR. - Using \ :literal:`force\_lock`\ uses operations that are subject to race conditions and can lead to data loss, use with caution. + Using ``force_lock`` uses operations that are subject to race conditions and can lead to data loss, use with caution. If a data set member has aliases, and is not a program object, copying that member to a dataset that is in use will result in the aliases not being preserved in the target dataset. When this scenario occurs the module will fail. @@ -183,21 +185,21 @@ force_lock ignore_sftp_stderr - During data transfer through SFTP, the module fails if the SFTP command directs any content to stderr. The user is able to override this behavior by setting this parameter to \ :literal:`true`\ . By doing so, the module would essentially ignore the stderr stream produced by SFTP and continue execution. + During data transfer through SFTP, the SFTP command directs content to stderr. By default, the module essentially ignores the stderr stream produced by SFTP and continues execution. The user is able to override this behavior by setting this parameter to ``false``. By doing so, any content written to stderr is considered an error by Ansible and will have module fail. - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using \ :strong:`-vvvv`\ or through environment variables such as \ :strong:`verbosity = 4`\ , then this parameter will automatically be set to \ :literal:`true`\ . + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. | **required**: False | **type**: bool - | **default**: False + | **default**: True is_binary - If set to \ :literal:`true`\ , indicates that the file or data set to be copied is a binary file or data set. + If set to ``true``, indicates that the file or data set to be copied is a binary file or data set. - When \ :emphasis:`is\_binary=true`\ , no encoding conversion is applied to the content, all content transferred retains the original state. + When *is_binary=true*, no encoding conversion is applied to the content, all content transferred retains the original state. - Use \ :emphasis:`is\_binary=true`\ when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. + Use *is_binary=true* when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. | **required**: False | **type**: bool @@ -205,15 +207,15 @@ is_binary executable - If set to \ :literal:`true`\ , indicates that the file or library to be copied is an executable. + If set to ``true``, indicates that the file or library to be copied is an executable. - If the \ :literal:`src`\ executable has an alias, the alias information is also copied. If the \ :literal:`dest`\ is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. + If the ``src`` executable has an alias, the alias information is also copied. If the ``dest`` is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. - If \ :emphasis:`executable=true`\ , and \ :literal:`dest`\ is a data set, it must be a PDS or PDSE (library). + If *executable=true*, and ``dest`` is a data set, it must be a PDS or PDSE (library). - If \ :literal:`dest`\ is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. + If ``dest`` is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. - If \ :literal:`dest`\ is a file, execute permission for the user will be added to the file (\`\`u+x\`\`). + If ``dest`` is a file, execute permission for the user will be added to the file (``u+x``). | **required**: False | **type**: bool @@ -221,9 +223,9 @@ executable aliases - If set to \ :literal:`true`\ , indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. + If set to ``true``, indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. - Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when \ :literal:`executable=True`\ and \ :literal:`dest`\ is a USS file or directory, this option will be ignored. + Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when ``executable=True`` and ``dest`` is a USS file or directory, this option will be ignored. Copying of aliases for text-based data sets from USS sources or to USS destinations is not currently supported. @@ -245,7 +247,7 @@ group When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. + This option is only applicable if ``dest`` is USS, otherwise ignored. | **required**: False | **type**: str @@ -254,13 +256,13 @@ group mode The permission of the destination file or directory. - If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. + If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. - The mode may also be specified as a symbolic mode (for example, \`\`u+rwx\`\` or \`\`u=rw,g=r,o=r\`\`) or a special string \`preserve\`. + The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. - \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the source file. + *mode=preserve* means that the file will be given the same permissions as the source file. | **required**: False | **type**: str @@ -271,16 +273,16 @@ owner When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. + This option is only applicable if ``dest`` is USS, otherwise ignored. | **required**: False | **type**: str remote_src - If set to \ :literal:`false`\ , the module searches for \ :literal:`src`\ at the local machine. + If set to ``false``, the module searches for ``src`` at the local machine. - If set to \ :literal:`true`\ , the module goes to the remote/target machine for \ :literal:`src`\ . + If set to ``true``, the module goes to the remote/target machine for ``src``. | **required**: False | **type**: bool @@ -290,27 +292,27 @@ remote_src src Path to a file/directory or name of a data set to copy to remote z/OS system. - If \ :literal:`remote\_src`\ is true, then \ :literal:`src`\ must be the path to a Unix System Services (USS) file, name of a data set, or data set member. + If ``remote_src`` is true, then ``src`` must be the path to a Unix System Services (USS) file, name of a data set, or data set member. - If \ :literal:`src`\ is a local path or a USS path, it can be absolute or relative. + If ``src`` is a local path or a USS path, it can be absolute or relative. - If \ :literal:`src`\ is a directory, \ :literal:`dest`\ must be a partitioned data set or a USS directory. + If ``src`` is a directory, ``dest`` must be a partitioned data set or a USS directory. - If \ :literal:`src`\ is a file and \ :literal:`dest`\ ends with "/" or is a directory, the file is copied to the directory with the same filename as \ :literal:`src`\ . + If ``src`` is a file and ``dest`` ends with "/" or is a directory, the file is copied to the directory with the same filename as ``src``. - If \ :literal:`src`\ is a directory and ends with "/", the contents of it will be copied into the root of \ :literal:`dest`\ . If it doesn't end with "/", the directory itself will be copied. + If ``src`` is a directory and ends with "/", the contents of it will be copied into the root of ``dest``. If it doesn't end with "/", the directory itself will be copied. - If \ :literal:`src`\ is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. + If ``src`` is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. - If \ :literal:`src`\ is a VSAM data set, \ :literal:`dest`\ must also be a VSAM. + If ``src`` is a VSAM data set, ``dest`` must also be a VSAM. - If \ :literal:`src`\ is a generation data set (GDS), it must be a previously allocated one. + If ``src`` is a generation data set (GDS), it must be a previously allocated one. - If \ :literal:`src`\ is a generation data group (GDG), \ :literal:`dest`\ can be another GDG or a USS directory. + If ``src`` is a generation data group (GDG), ``dest`` can be another GDG or a USS directory. Wildcards can be used to copy multiple PDS/PDSE members to another PDS/PDSE. - Required unless using \ :literal:`content`\ . + Required unless using ``content``. | **required**: False | **type**: str @@ -327,24 +329,24 @@ validate volume - If \ :literal:`dest`\ does not exist, specify which volume \ :literal:`dest`\ should be allocated to. + If ``dest`` does not exist, specify which volume ``dest`` should be allocated to. Only valid when the destination is an MVS data set. The volume must already be present on the device. - If no volume is specified, storage management rules will be used to determine the volume where \ :literal:`dest`\ will be allocated. + If no volume is specified, storage management rules will be used to determine the volume where ``dest`` will be allocated. - If the storage administrator has specified a system default unit name and you do not set a \ :literal:`volume`\ name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. + If the storage administrator has specified a system default unit name and you do not set a ``volume`` name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. | **required**: False | **type**: str dest_data_set - Data set attributes to customize a \ :literal:`dest`\ data set to be copied into. + Data set attributes to customize a ``dest`` data set to be copied into. - Some attributes only apply when \ :literal:`dest`\ is a generation data group (GDG). + Some attributes only apply when ``dest`` is a generation data group (GDG). | **required**: False | **type**: dict @@ -359,18 +361,18 @@ dest_data_set space_primary - If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. + If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int space_secondary - If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. + If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int @@ -379,7 +381,7 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. | **required**: False | **type**: str @@ -387,7 +389,7 @@ dest_data_set record_format - If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`fb`\ ) + If the destination data set does not exist, this sets the format of the data set. (e.g ``fb``) Choices are case-sensitive. @@ -424,9 +426,9 @@ dest_data_set key_offset The key offset to use when creating a KSDS data set. - \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . + *key_offset* is required when *type=ksds*. - \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_offset* should only be provided when *type=ksds* | **required**: False | **type**: int @@ -435,9 +437,9 @@ dest_data_set key_length The key length to use when creating a KSDS data set. - \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . + *key_length* is required when *type=ksds*. - \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_length* should only be provided when *type=ksds* | **required**: False | **type**: int @@ -483,18 +485,18 @@ dest_data_set limit - Sets the \ :emphasis:`limit`\ attribute for a GDG. + Sets the *limit* attribute for a GDG. Specifies the maximum number, from 1 to 255(up to 999 if extended), of generations that can be associated with the GDG being defined. - \ :emphasis:`limit`\ is required when \ :emphasis:`type=gdg`\ . + *limit* is required when *type=gdg*. | **required**: False | **type**: int empty - Sets the \ :emphasis:`empty`\ attribute for a GDG. + Sets the *empty* attribute for a GDG. If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. @@ -505,7 +507,7 @@ dest_data_set scratch - Sets the \ :emphasis:`scratch`\ attribute for a GDG. + Sets the *scratch* attribute for a GDG. Specifies what action is to be taken for a generation data set located on disk volumes when the data set is uncataloged from the GDG base as a result of EMPTY/NOEMPTY processing. @@ -514,16 +516,16 @@ dest_data_set purge - Sets the \ :emphasis:`purge`\ attribute for a GDG. + Sets the *purge* attribute for a GDG. - Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the \ :literal:`scratch`\ option is set. + Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. | **required**: False | **type**: bool extended - Sets the \ :emphasis:`extended`\ attribute for a GDG. + Sets the *extended* attribute for a GDG. If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. @@ -534,7 +536,7 @@ dest_data_set fifo - Sets the \ :emphasis:`fifo`\ attribute for a GDG. + Sets the *fifo* attribute for a GDG. If false, the order is the newest GDS defined to the oldest GDS. This is the default value. @@ -546,13 +548,13 @@ dest_data_set use_template - Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when \ :literal:`src`\ is a local file or directory. + Only valid when ``src`` is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ | **required**: False | **type**: bool @@ -562,9 +564,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . + Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. - These options are ignored unless \ :literal:`use\_template`\ is true. + These options are ignored unless ``use_template`` is true. | **required**: False | **type**: dict @@ -643,7 +645,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -890,17 +892,17 @@ Notes .. note:: Destination data sets are assumed to be in catalog. When trying to copy to an uncataloged data set, the module assumes that the data set does not exist and will create it. - Destination will be backed up if either \ :literal:`backup`\ is \ :literal:`true`\ or \ :literal:`backup\_name`\ is provided. If \ :literal:`backup`\ is \ :literal:`false`\ but \ :literal:`backup\_name`\ is provided, task will fail. + Destination will be backed up if either ``backup`` is ``true`` or ``backup_name`` is provided. If ``backup`` is ``false`` but ``backup_name`` is provided, task will fail. When copying local files or directories, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file or directory being copied. Temporary files will always be deleted, regardless of success or failure of the copy task. VSAM data sets can only be copied to other VSAM data sets. - For supported character sets used to encode data, refer to the \ `documentation `__\ . + For supported character sets used to encode data, refer to the `documentation `_. This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - Beginning in version 1.8.x, zos\_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option \ :literal:`executable`\ that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos\_copy.html) error. + Beginning in version 1.8.x, zos_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option ``executable`` that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos_copy.html) error. It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF FACILITY resource class. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. @@ -1011,7 +1013,7 @@ destination_attributes checksum - SHA256 checksum of the file after running zos\_copy. + SHA256 checksum of the file after running zos_copy. | **returned**: When ``validate=true`` and if ``dest`` is USS | **type**: str diff --git a/docs/source/modules/zos_data_set.rst b/docs/source/modules/zos_data_set.rst index caed66ba9..7a56cfe84 100644 --- a/docs/source/modules/zos_data_set.rst +++ b/docs/source/modules/zos_data_set.rst @@ -28,11 +28,11 @@ Parameters name - The name of the data set being managed. (e.g \ :literal:`USER.TEST`\ ) + The name of the data set being managed. (e.g ``USER.TEST``) - If \ :emphasis:`name`\ is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - Required if \ :emphasis:`type=member`\ or \ :emphasis:`state!=present`\ and not using \ :emphasis:`batch`\ . + Required if *type=member* or *state!=present* and not using *batch*. | **required**: False | **type**: str @@ -41,52 +41,52 @@ name state The final state desired for specified data set. - If \ :emphasis:`state=absent`\ and the data set does not exist on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=absent`\ and the data set does exist on the managed node, remove the data set, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. - If \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ and \ :emphasis:`force=True`\ , the data set will be opened with \ :emphasis:`DISP=SHR`\ such that the entire data set can be accessed by other processes while the specified member is deleted. + If *state=absent* and *type=member* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. - If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with \ :emphasis:`changed=True`\ . + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. - If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. - If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided \ :emphasis:`volumes`\ . If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . + If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. - If \ :emphasis:`state=absent`\ and \ :emphasis:`type=gdg`\ and the GDG base has active generations the module will complete successfully with \ :emphasis:`changed=False`\ . To remove it option \ :emphasis:`force`\ needs to be used. If the GDG base does not have active generations the module will complete successfully with \ :emphasis:`changed=True`\ . + If *state=absent* and *type=gdg* and the GDG base has active generations the module will complete successfully with *changed=False*. To remove it option *force* needs to be used. If the GDG base does not have active generations the module will complete successfully with *changed=True*. - If \ :emphasis:`state=present`\ and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. - If \ :emphasis:`state=present`\ and \ :emphasis:`replace=True`\ and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. - If \ :emphasis:`state=present`\ and \ :emphasis:`replace=False`\ and the data set is present on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=present`\ and \ :emphasis:`type=member`\ and the member does not exist in the data set, create a member formatted to store data, module completes successfully with \ :emphasis:`changed=True`\ . Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + If *state=present* and *type=member* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is already cataloged, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. - If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, returns failure with \ :emphasis:`changed=False`\ . + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. - If \ :emphasis:`state=uncataloged`\ and the data set is not found, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=uncataloged`\ and the data set is found, the data set is uncataloged, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. | **required**: False @@ -96,9 +96,9 @@ state type - The data set type to be used when creating a data set. (e.g \ :literal:`pdse`\ ). + The data set type to be used when creating a data set. (e.g ``pdse``). - \ :literal:`member`\ expects to be used with an existing partitioned data set. + ``member`` expects to be used with an existing partitioned data set. Choices are case-sensitive. @@ -111,7 +111,7 @@ type space_primary The amount of primary space to allocate for the dataset. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int @@ -121,7 +121,7 @@ space_primary space_secondary The amount of secondary space to allocate for the dataset. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int @@ -131,7 +131,7 @@ space_secondary space_type The unit of measurement to use when defining primary and secondary space. - Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. | **required**: False | **type**: str @@ -140,11 +140,11 @@ space_type record_format - The format of the data set. (e.g \ :literal:`FB`\ ) + The format of the data set. (e.g ``FB``) Choices are case-sensitive. - When \ :emphasis:`type=ksds`\ , \ :emphasis:`type=esds`\ , \ :emphasis:`type=rrds`\ , \ :emphasis:`type=lds`\ or \ :emphasis:`type=zfs`\ then \ :emphasis:`record\_format=None`\ , these types do not have a default \ :emphasis:`record\_format`\ . + When *type=ksds*, *type=esds*, *type=rrds*, *type=lds* or *type=zfs* then *record_format=None*, these types do not have a default *record_format*. | **required**: False | **type**: str @@ -219,9 +219,9 @@ directory_blocks key_offset The key offset to use when creating a KSDS data set. - \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . + *key_offset* is required when *type=ksds*. - \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_offset* should only be provided when *type=ksds* | **required**: False | **type**: int @@ -230,16 +230,16 @@ key_offset key_length The key length to use when creating a KSDS data set. - \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . + *key_length* is required when *type=ksds*. - \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_length* should only be provided when *type=ksds* | **required**: False | **type**: int empty - Sets the \ :emphasis:`empty`\ attribute for Generation Data Groups. + Sets the *empty* attribute for Generation Data Groups. If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. @@ -252,7 +252,7 @@ empty extended - Sets the \ :emphasis:`extended`\ attribute for Generation Data Groups. + Sets the *extended* attribute for Generation Data Groups. If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. @@ -265,7 +265,7 @@ extended fifo - Sets the \ :emphasis:`fifo`\ attribute for Generation Data Groups. + Sets the *fifo* attribute for Generation Data Groups. If false, the order is the newest GDS defined to the oldest GDS. This is the default value. @@ -278,27 +278,27 @@ fifo limit - Sets the \ :emphasis:`limit`\ attribute for Generation Data Groups. + Sets the *limit* attribute for Generation Data Groups. Specifies the maximum number, from 1 to 255(up to 999 if extended), of GDS that can be associated with the GDG being defined. - \ :emphasis:`limit`\ is required when \ :emphasis:`type=gdg`\ . + *limit* is required when *type=gdg*. | **required**: False | **type**: int purge - Sets the \ :emphasis:`purge`\ attribute for Generation Data Groups. + Sets the *purge* attribute for Generation Data Groups. - Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the \ :literal:`scratch`\ option is set. + Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. | **required**: False | **type**: bool scratch - Sets the \ :emphasis:`scratch`\ attribute for Generation Data Groups. + Sets the *scratch* attribute for Generation Data Groups. Specifies what action is to be taken for a generation data set located on disk volumes when the data set is uncataloged from the GDG base as a result of EMPTY/NOEMPTY processing. @@ -307,19 +307,19 @@ scratch volumes - If cataloging a data set, \ :emphasis:`volumes`\ specifies the name of the volume(s) where the data set is located. + If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. - If creating a data set, \ :emphasis:`volumes`\ specifies the volume(s) where the data set should be created. + If creating a data set, *volumes* specifies the volume(s) where the data set should be created. - If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=present`\ , and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=absent`\ and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - \ :emphasis:`volumes`\ is required when \ :emphasis:`state=cataloged`\ . + *volumes* is required when *state=cataloged*. Accepts a string when using a single volume and a list of strings when using multiple. @@ -328,12 +328,12 @@ volumes replace - When \ :emphasis:`replace=True`\ , and \ :emphasis:`state=present`\ , existing data set matching \ :emphasis:`name`\ will be replaced. + When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - If \ :emphasis:`replace=True`\ , all data in the original data set will be lost. + If *replace=True*, all data in the original data set will be lost. | **required**: False | **type**: bool @@ -343,7 +343,7 @@ replace tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str @@ -354,11 +354,11 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - The \ :emphasis:`force=True`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . + The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. - The \ :emphasis:`force=True`\ only applies to data set members when \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ and when removing a GDG base with active generations. + The *force=True* only applies to data set members when *state=absent* and *type=member* and when removing a GDG base with active generations. - If \ :emphasis:`force=True`\ , \ :emphasis:`type=gdg`\ and \ :emphasis:`state=absent`\ it will force remove a GDG base with active generations. + If *force=True*, *type=gdg* and *state=absent* it will force remove a GDG base with active generations. | **required**: False | **type**: bool @@ -374,11 +374,11 @@ batch name - The name of the data set being managed. (e.g \ :literal:`USER.TEST`\ ) + The name of the data set being managed. (e.g ``USER.TEST``) - If \ :emphasis:`name`\ is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - Required if \ :emphasis:`type=member`\ or \ :emphasis:`state!=present`\ + Required if *type=member* or *state!=present* | **required**: False | **type**: str @@ -387,49 +387,49 @@ batch state The final state desired for specified data set. - If \ :emphasis:`state=absent`\ and the data set does not exist on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=absent`\ and the data set does exist on the managed node, remove the data set, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. - If \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ and \ :emphasis:`force=True`\ , the data set will be opened with \ :emphasis:`DISP=SHR`\ such that the entire data set can be accessed by other processes while the specified member is deleted. + If *state=absent* and *type=member* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. - If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with \ :emphasis:`changed=True`\ . + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. - If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. - If \ :emphasis:`state=absent`\ and \ :emphasis:`volumes`\ is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided \ :emphasis:`volumes`\ . If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with \ :emphasis:`changed=False`\ . + If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. - If \ :emphasis:`state=present`\ and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. - If \ :emphasis:`state=present`\ and \ :emphasis:`replace=True`\ and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. - If \ :emphasis:`state=present`\ and \ :emphasis:`replace=False`\ and the data set is present on the managed node, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=present`\ and \ :emphasis:`type=member`\ and the member does not exist in the data set, create a member formatted to store data, module completes successfully with \ :emphasis:`changed=True`\ . Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + If *state=present* and *type=member* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is already cataloged, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog is successful, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. - If \ :emphasis:`state=cataloged`\ and \ :emphasis:`volumes`\ is provided and the data set is not cataloged, module attempts to perform catalog using supplied \ :emphasis:`name`\ and \ :emphasis:`volumes`\ . If the attempt to catalog the data set catalog fails, returns failure with \ :emphasis:`changed=False`\ . + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. - If \ :emphasis:`state=uncataloged`\ and the data set is not found, no action taken, module completes successfully with \ :emphasis:`changed=False`\ . + If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. - If \ :emphasis:`state=uncataloged`\ and the data set is found, the data set is uncataloged, module completes successfully with \ :emphasis:`changed=True`\ . + If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. | **required**: False @@ -439,9 +439,9 @@ batch type - The data set type to be used when creating a data set. (e.g \ :literal:`pdse`\ ) + The data set type to be used when creating a data set. (e.g ``pdse``) - \ :literal:`member`\ expects to be used with an existing partitioned data set. + ``member`` expects to be used with an existing partitioned data set. Choices are case-sensitive. @@ -454,7 +454,7 @@ batch space_primary The amount of primary space to allocate for the dataset. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int @@ -464,7 +464,7 @@ batch space_secondary The amount of secondary space to allocate for the dataset. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int @@ -474,7 +474,7 @@ batch space_type The unit of measurement to use when defining primary and secondary space. - Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. | **required**: False | **type**: str @@ -483,11 +483,11 @@ batch record_format - The format of the data set. (e.g \ :literal:`FB`\ ) + The format of the data set. (e.g ``FB``) Choices are case-sensitive. - When \ :emphasis:`type=ksds`\ , \ :emphasis:`type=esds`\ , \ :emphasis:`type=rrds`\ , \ :emphasis:`type=lds`\ or \ :emphasis:`type=zfs`\ then \ :emphasis:`record\_format=None`\ , these types do not have a default \ :emphasis:`record\_format`\ . + When *type=ksds*, *type=esds*, *type=rrds*, *type=lds* or *type=zfs* then *record_format=None*, these types do not have a default *record_format*. | **required**: False | **type**: str @@ -562,9 +562,9 @@ batch key_offset The key offset to use when creating a KSDS data set. - \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . + *key_offset* is required when *type=ksds*. - \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_offset* should only be provided when *type=ksds* | **required**: False | **type**: int @@ -573,16 +573,16 @@ batch key_length The key length to use when creating a KSDS data set. - \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . + *key_length* is required when *type=ksds*. - \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_length* should only be provided when *type=ksds* | **required**: False | **type**: int empty - Sets the \ :emphasis:`empty`\ attribute for Generation Data Groups. + Sets the *empty* attribute for Generation Data Groups. If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. @@ -595,7 +595,7 @@ batch extended - Sets the \ :emphasis:`extended`\ attribute for Generation Data Groups. + Sets the *extended* attribute for Generation Data Groups. If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. @@ -608,7 +608,7 @@ batch fifo - Sets the \ :emphasis:`fifo`\ attribute for Generation Data Groups. + Sets the *fifo* attribute for Generation Data Groups. If false, the order is the newest GDS defined to the oldest GDS. This is the default value. @@ -621,27 +621,27 @@ batch limit - Sets the \ :emphasis:`limit`\ attribute for Generation Data Groups. + Sets the *limit* attribute for Generation Data Groups. Specifies the maximum number, from 1 to 255(up to 999 if extended), of GDS that can be associated with the GDG being defined. - \ :emphasis:`limit`\ is required when \ :emphasis:`type=gdg`\ . + *limit* is required when *type=gdg*. | **required**: False | **type**: int purge - Sets the \ :emphasis:`purge`\ attribute for Generation Data Groups. + Sets the *purge* attribute for Generation Data Groups. - Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the \ :literal:`scratch`\ option is set. + Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. | **required**: False | **type**: bool scratch - Sets the \ :emphasis:`scratch`\ attribute for Generation Data Groups. + Sets the *scratch* attribute for Generation Data Groups. Specifies what action is to be taken for a generation data set located on disk volumes when the data set is uncataloged from the GDG base as a result of EMPTY/NOEMPTY processing. @@ -650,19 +650,19 @@ batch volumes - If cataloging a data set, \ :emphasis:`volumes`\ specifies the name of the volume(s) where the data set is located. + If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. - If creating a data set, \ :emphasis:`volumes`\ specifies the volume(s) where the data set should be created. + If creating a data set, *volumes* specifies the volume(s) where the data set should be created. - If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=present`\ , and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - If \ :emphasis:`volumes`\ is provided when \ :emphasis:`state=absent`\ and the data set is not found in the catalog, \ `zos\_data\_set <./zos_data_set.html>`__\ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - \ :emphasis:`volumes`\ is required when \ :emphasis:`state=cataloged`\ . + *volumes* is required when *state=cataloged*. Accepts a string when using a single volume and a list of strings when using multiple. @@ -671,12 +671,12 @@ batch replace - When \ :emphasis:`replace=True`\ , and \ :emphasis:`state=present`\ , existing data set matching \ :emphasis:`name`\ will be replaced. + When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - If \ :emphasis:`replace=True`\ , all data in the original data set will be lost. + If *replace=True*, all data in the original data set will be lost. | **required**: False | **type**: bool @@ -688,9 +688,9 @@ batch This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - The \ :emphasis:`force=True`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . + The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. - The \ :emphasis:`force=True`\ only applies to data set members when \ :emphasis:`state=absent`\ and \ :emphasis:`type=member`\ . + The *force=True* only applies to data set members when *state=absent* and *type=member*. | **required**: False | **type**: bool diff --git a/docs/source/modules/zos_encode.rst b/docs/source/modules/zos_encode.rst index 51bcca12d..860a150bf 100644 --- a/docs/source/modules/zos_encode.rst +++ b/docs/source/modules/zos_encode.rst @@ -37,7 +37,7 @@ encoding from - The character set of the source \ :emphasis:`src`\ . + The character set of the source *src*. | **required**: False | **type**: str @@ -45,7 +45,7 @@ encoding to - The destination \ :emphasis:`dest`\ character set for the output to be written as. + The destination *dest* character set for the output to be written as. | **required**: False | **type**: str @@ -58,7 +58,7 @@ src The USS path or file must be an absolute pathname. - If \ :emphasis:`src`\ is a USS directory, all files will be encoded. + If *src* is a USS directory, all files will be encoded. Encoding a whole generation data group (GDG) is not supported. @@ -69,24 +69,24 @@ src dest The location where the converted characters are output. - The destination \ :emphasis:`dest`\ can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, a generation data set (GDS) or KSDS (VSAM data set). + The destination *dest* can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, a generation data set (GDS) or KSDS (VSAM data set). - If the length of the PDSE member name used in \ :emphasis:`dest`\ is greater than 8 characters, the member name will be truncated when written out. + If the length of the PDSE member name used in *dest* is greater than 8 characters, the member name will be truncated when written out. - If \ :emphasis:`dest`\ is not specified, the \ :emphasis:`src`\ will be used as the destination and will overwrite the \ :emphasis:`src`\ with the character set in the option \ :emphasis:`to\_encoding`\ . + If *dest* is not specified, the *src* will be used as the destination and will overwrite the *src* with the character set in the option *to_encoding*. The USS file or path must be an absolute pathname. - If \ :emphasis:`dest`\ is a data set, it must be already allocated. + If *dest* is a data set, it must be already allocated. | **required**: False | **type**: str backup - Creates a backup file or backup data set for \ :emphasis:`dest`\ , including the timestamp information to ensure that you retrieve the original file. + Creates a backup file or backup data set for *dest*, including the timestamp information to ensure that you retrieve the original file. - \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . + *backup_name* can be used to specify a backup file name if *backup=true*. | **required**: False | **type**: bool @@ -96,15 +96,15 @@ backup backup_name Specify the USS file name or data set name for the dest backup. - If dest is a USS file or path, \ :emphasis:`backup\_name`\ must be a file or path name, and the USS path or file must be an absolute pathname. + If dest is a USS file or path, *backup_name* must be a file or path name, and the USS path or file must be an absolute pathname. - If dest is an MVS data set, the \ :emphasis:`backup\_name`\ must be an MVS data set name. + If dest is an MVS data set, the *backup_name* must be an MVS data set name. - If \ :emphasis:`backup\_name`\ is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file\_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. + If *backup_name* is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. - \ :literal:`backup\_name`\ will be returned on either success or failure of module execution such that data can be retrieved. + ``backup_name`` will be returned on either success or failure of module execution such that data can be retrieved. - If \ :emphasis:`backup\_name`\ is a generation data set (GDS), it must be a relative positive name (for example, \ :literal:`HLQ.USER.GDG(+1)`\ ). + If *backup_name* is a generation data set (GDS), it must be a relative positive name (for example, V(HLQ.USER.GDG(+1\))). | **required**: False | **type**: str @@ -113,7 +113,7 @@ backup_name backup_compress Determines if backups to USS files or paths should be compressed. - \ :emphasis:`backup\_compress`\ is only used when \ :emphasis:`backup=true`\ . + *backup_compress* is only used when *backup=true*. | **required**: False | **type**: bool @@ -123,7 +123,7 @@ backup_compress tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str @@ -288,7 +288,7 @@ Notes All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - For supported character sets used to encode data, refer to the \ `documentation `__\ . + For supported character sets used to encode data, refer to the `documentation `_. @@ -301,7 +301,7 @@ Return Values src - The location of the input characters identified in option \ :emphasis:`src`\ . + The location of the input characters identified in option *src*. | **returned**: always | **type**: str diff --git a/docs/source/modules/zos_fetch.rst b/docs/source/modules/zos_fetch.rst index 23d58c864..e3f0df325 100644 --- a/docs/source/modules/zos_fetch.rst +++ b/docs/source/modules/zos_fetch.rst @@ -20,7 +20,7 @@ Synopsis - When fetching a sequential data set, the destination file name will be the same as the data set name. - When fetching a PDS or PDSE, the destination will be a directory with the same name as the PDS or PDSE. - When fetching a PDS/PDSE member, destination will be a file. -- Files that already exist at \ :literal:`dest`\ will be overwritten if they are different than \ :literal:`src`\ . +- Files that already exist at ``dest`` will be overwritten if they are different than ``src``. - When fetching a GDS, the relative name will be resolved to its absolute one. - When fetching a generation data group, the destination will be a directory with the same name as the GDG. @@ -98,7 +98,7 @@ encoding from - The character set of the source \ :emphasis:`src`\ . + The character set of the source *src*. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -107,7 +107,7 @@ encoding to - The destination \ :emphasis:`dest`\ character set for the output to be written as. + The destination *dest* character set for the output to be written as. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -119,20 +119,20 @@ encoding tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str ignore_sftp_stderr - During data transfer through sftp, the module fails if the sftp command directs any content to stderr. The user is able to override this behavior by setting this parameter to \ :literal:`true`\ . By doing so, the module would essentially ignore the stderr stream produced by sftp and continue execution. + During data transfer through SFTP, the SFTP command directs content to stderr. By default, the module essentially ignores the stderr stream produced by SFTP and continues execution. The user is able to override this behavior by setting this parameter to ``false``. By doing so, any content written to stderr is considered an error by Ansible and will have module fail. - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using \ :strong:`-vvvv`\ or through environment variables such as \ :strong:`verbosity = 4`\ , then this parameter will automatically be set to \ :literal:`true`\ . + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. | **required**: False | **type**: bool - | **default**: False + | **default**: True @@ -216,13 +216,13 @@ Notes .. note:: When fetching PDSE and VSAM data sets, temporary storage will be used on the remote z/OS system. After the PDSE or VSAM data set is successfully transferred, the temporary storage will be deleted. The size of the temporary storage will correspond to the size of PDSE or VSAM data set being fetched. If module execution fails, the temporary storage will be deleted. - To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the \ :literal:`checksum`\ parameter. + To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the ``checksum`` parameter. All data sets are always assumed to be cataloged. If an uncataloged data set needs to be fetched, it should be cataloged first. Fetching HFS or ZFS type data sets is currently not supported. - For supported character sets used to encode data, refer to the \ `documentation `__\ . + For supported character sets used to encode data, refer to the `documentation `_. This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. @@ -283,7 +283,7 @@ data_set_type | **sample**: PDSE note - Notice of module failure when \ :literal:`fail\_on\_missing`\ is false. + Notice of module failure when ``fail_on_missing`` is false. | **returned**: failure and fail_on_missing=false | **type**: str diff --git a/docs/source/modules/zos_find.rst b/docs/source/modules/zos_find.rst index 83082b5c0..5c23a28a7 100644 --- a/docs/source/modules/zos_find.rst +++ b/docs/source/modules/zos_find.rst @@ -18,7 +18,7 @@ Synopsis -------- - Return a list of data sets based on specific criteria. - Multiple criteria can be added (AND'd) together. -- The \ :literal:`zos\_find`\ module can only find MVS data sets. Use the \ `find `__\ module to find USS files. +- The ``zos_find`` module can only find MVS data sets. Use the `find `_ module to find USS files. @@ -44,9 +44,9 @@ age age_stamp Choose the age property against which to compare age. - \ :literal:`creation\_date`\ is the date the data set was created and \ :literal:`ref\_date`\ is the date the data set was last referenced. + ``creation_date`` is the date the data set was created and ``ref_date`` is the date the data set was last referenced. - \ :literal:`ref\_date`\ is only applicable to sequential and partitioned data sets. + ``ref_date`` is only applicable to sequential and partitioned data sets. | **required**: False | **type**: str @@ -80,7 +80,7 @@ patterns This parameter expects a list, which can be either comma separated or YAML. - If \ :literal:`pds\_patterns`\ is provided, \ :literal:`patterns`\ must be member patterns. + If ``pds_patterns`` is provided, ``patterns`` must be member patterns. When searching for members within a PDS/PDSE, pattern can be a regular expression. @@ -107,7 +107,7 @@ pds_patterns Required when searching for data set members. - Valid only for \ :literal:`nonvsam`\ resource types. Otherwise ignored. + Valid only for ``nonvsam`` resource types. Otherwise ignored. | **required**: False | **type**: list @@ -117,14 +117,16 @@ pds_patterns resource_type The type of resource to search. - \ :literal:`nonvsam`\ refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. + ``nonvsam`` refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. - \ :literal:`cluster`\ refers to a VSAM cluster. The \ :literal:`data`\ and \ :literal:`index`\ are the data and index components of a VSAM cluster. + ``cluster`` refers to a VSAM cluster. The ``data`` and ``index`` are the data and index components of a VSAM cluster. + + ``gdg`` refers to Generation Data Groups. The module searches based on the GDG base name. | **required**: False | **type**: str | **default**: nonvsam - | **choices**: nonvsam, cluster, data, index + | **choices**: nonvsam, cluster, data, index, gdg volume @@ -135,6 +137,60 @@ volume | **elements**: str +empty + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *empty* attribute set as provided. + + | **required**: False + | **type**: bool + + +extended + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *extended* attribute set as provided. + + | **required**: False + | **type**: bool + + +fifo + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *fifo* attribute set as provided. + + | **required**: False + | **type**: bool + + +limit + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *limit* attribute set as provided. + + | **required**: False + | **type**: int + + +purge + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *purge* attribute set as provided. + + | **required**: False + | **type**: bool + + +scratch + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *scratch* attribute set as provided. + + | **required**: False + | **type**: bool + + Examples @@ -185,6 +241,15 @@ Examples - USER.* resource_type: cluster + - name: Find all Generation Data Groups starting with the word 'USER' and specific GDG attributes. + zos_find: + patterns: + - USER.* + resource_type: gdg + limit: 30 + scratch: true + purge: true + @@ -192,11 +257,11 @@ Notes ----- .. note:: - Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The \ `zos\_data\_set <./zos_data_set.html>`__\ module can be used to catalog uncataloged data sets. + Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. - The \ `zos\_find <./zos_find.html>`__\ module currently does not support wildcards for high level qualifiers. For example, \ :literal:`SOME.\*.DATA.SET`\ is a valid pattern, but \ :literal:`\*.DATA.SET`\ is not. + The `zos_find <./zos_find.html>`_ module currently does not support wildcards for high level qualifiers. For example, ``SOME.*.DATA.SET`` is a valid pattern, but ``*.DATA.SET`` is not. - If a data set pattern is specified as \ :literal:`USER.\*`\ , the matching data sets will have two name segments such as \ :literal:`USER.ABC`\ , \ :literal:`USER.XYZ`\ etc. If a wildcard is specified as \ :literal:`USER.\*.ABC`\ , the matching data sets will have three name segments such as \ :literal:`USER.XYZ.ABC`\ , \ :literal:`USER.TEST.ABC`\ etc. + If a data set pattern is specified as ``USER.*``, the matching data sets will have two name segments such as ``USER.ABC``, ``USER.XYZ`` etc. If a wildcard is specified as ``USER.*.ABC``, the matching data sets will have three name segments such as ``USER.XYZ.ABC``, ``USER.TEST.ABC`` etc. The time taken to execute the module is proportional to the number of data sets present on the system and how large the data sets are. diff --git a/docs/source/modules/zos_gather_facts.rst b/docs/source/modules/zos_gather_facts.rst index 02a56fd23..0247ffd96 100644 --- a/docs/source/modules/zos_gather_facts.rst +++ b/docs/source/modules/zos_gather_facts.rst @@ -17,8 +17,8 @@ zos_gather_facts -- Gather z/OS system facts. Synopsis -------- - Retrieve variables from target z/OS systems. -- Variables are added to the \ :emphasis:`ansible\_facts`\ dictionary, available to playbooks. -- Apply filters on the \ :emphasis:`gather\_subset`\ list to reduce the variables that are added to the \ :emphasis:`ansible\_facts`\ dictionary. +- Variables are added to the *ansible_facts* dictionary, available to playbooks. +- Apply filters on the *gather_subset* list to reduce the variables that are added to the *ansible_facts* dictionary. - Note, the module will fail fast if any unsupported options are provided. This is done to raise awareness of a failure in an automation setting. @@ -32,7 +32,7 @@ Parameters gather_subset If specified, it will collect facts that come under the specified subset (eg. ipl will return ipl facts). Specifying subsets is recommended to reduce time in gathering facts when the facts needed are in a specific subset. - The following subsets are available \ :literal:`ipl`\ , \ :literal:`cpu`\ , \ :literal:`sys`\ , and \ :literal:`iodf`\ . Depending on the version of ZOAU, additional subsets may be available. + The following subsets are available ``ipl``, ``cpu``, ``sys``, and ``iodf``. Depending on the version of ZOAU, additional subsets may be available. | **required**: False | **type**: list @@ -41,13 +41,13 @@ gather_subset filter - Filter out facts from the \ :emphasis:`ansible\_facts`\ dictionary. + Filter out facts from the *ansible_facts* dictionary. - Uses shell-style \ `fnmatch `__\ pattern matching to filter out the collected facts. + Uses shell-style `fnmatch `_ pattern matching to filter out the collected facts. - An empty list means 'no filter', same as providing '\*'. + An empty list means 'no filter', same as providing '*'. - Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the \ :emphasis:`ansible\_facts`\ dictionary. To restrict the facts that are collected, refer to the \ :emphasis:`gather\_subset`\ parameter. + Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the *ansible_facts* dictionary. To restrict the facts that are collected, refer to the *gather_subset* parameter. | **required**: False | **type**: list diff --git a/docs/source/modules/zos_job_output.rst b/docs/source/modules/zos_job_output.rst index 59e37aeb9..efea6ea2a 100644 --- a/docs/source/modules/zos_job_output.rst +++ b/docs/source/modules/zos_job_output.rst @@ -18,9 +18,9 @@ Synopsis -------- - Display the z/OS job output for a given criteria (Job id/Job name/owner) with/without a data definition name as a filter. - At least provide a job id/job name/owner. -- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC\*" or "\*". -- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP\*" or "\*". -- The owner can be specific such as "IBMUSER", or one that uses a pattern like "\*". +- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC*" or "*". +- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP*" or "*". +- The owner can be specific such as "IBMUSER", or one that uses a pattern like "*". - If there is no ddname, or if ddname="?", output of all the ddnames under the given job will be displayed. @@ -32,21 +32,21 @@ Parameters job_id - The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC\*") + The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC*") | **required**: False | **type**: str job_name - The name of the batch job. (e.g "TCPIP", "C\*") + The name of the batch job. (e.g "TCPIP", "C*") | **required**: False | **type**: str owner - The owner who ran the job. (e.g "IBMUSER", "\*") + The owner who ran the job. (e.g "IBMUSER", "*") | **required**: False | **type**: str @@ -97,7 +97,7 @@ Return Values jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret\_code dictionary with parameter msg\_txt = The job could not be found. + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. | **returned**: success | **type**: list @@ -416,7 +416,7 @@ jobs | **sample**: CC 0000 msg_code - Return code extracted from the \`msg\` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". | **type**: str | **sample**: S0C4 diff --git a/docs/source/modules/zos_job_query.rst b/docs/source/modules/zos_job_query.rst index e4da71341..ea320dfc3 100644 --- a/docs/source/modules/zos_job_query.rst +++ b/docs/source/modules/zos_job_query.rst @@ -17,8 +17,8 @@ zos_job_query -- Query job status Synopsis -------- - List z/OS job(s) and the current status of the job(s). -- Uses job\_name to filter the jobs by the job name. -- Uses job\_id to filter the jobs by the job identifier. +- Uses job_name to filter the jobs by the job name. +- Uses job_id to filter the jobs by the job identifier. - Uses owner to filter the jobs by the job owner. - Uses system to filter the jobs by system where the job is running (or ran) on. @@ -35,9 +35,9 @@ job_name A job name can be up to 8 characters long. - The \ :emphasis:`job\_name`\ can contain include multiple wildcards. + The *job_name* can contain include multiple wildcards. - The asterisk (\`\*\`) wildcard will match zero or more specified characters. + The asterisk (`*`) wildcard will match zero or more specified characters. | **required**: False | **type**: str @@ -56,13 +56,13 @@ owner job_id The job id that has been assigned to the job. - A job id must begin with \`STC\`, \`JOB\`, \`TSU\` and are followed by up to 5 digits. + A job id must begin with `STC`, `JOB`, `TSU` and are followed by up to 5 digits. - When a job id is greater than 99,999, the job id format will begin with \`S\`, \`J\`, \`T\` and are followed by 7 digits. + When a job id is greater than 99,999, the job id format will begin with `S`, `J`, `T` and are followed by 7 digits. - The \ :emphasis:`job\_id`\ can contain include multiple wildcards. + The *job_id* can contain include multiple wildcards. - The asterisk (\`\*\`) wildcard will match zero or more specified characters. + The asterisk (`*`) wildcard will match zero or more specified characters. | **required**: False | **type**: str @@ -122,7 +122,7 @@ changed | **type**: bool jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret\_code dictionary with parameter msg\_txt = The job could not be found. + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. | **returned**: success | **type**: list @@ -211,7 +211,7 @@ jobs | **sample**: CC 0000 msg_code - Return code extracted from the \`msg\` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". | **type**: str | **sample**: S0C4 diff --git a/docs/source/modules/zos_job_submit.rst b/docs/source/modules/zos_job_submit.rst index bec95cb54..573b4f4bd 100644 --- a/docs/source/modules/zos_job_submit.rst +++ b/docs/source/modules/zos_job_submit.rst @@ -31,11 +31,11 @@ Parameters src The source file or data set containing the JCL to submit. - It could be a physical sequential data set, a partitioned data set qualified by a member or a path (e.g. \ :literal:`USER.TEST`\ , \ :literal:`USER.JCL(TEST)`\ ), or a generation data set from a generation data group (for example, \ :literal:`USER.TEST.GDG(-2)`\ ). + It could be a physical sequential data set, a partitioned data set qualified by a member or a path (e.g. ``USER.TEST``, ``USER.JCL(TEST)``), or a generation data set from a generation data group (for example, ``USER.TEST.GDG(-2)``). - Or a USS file. (e.g \ :literal:`/u/tester/demo/sample.jcl`\ ) + Or a USS file. (e.g ``/u/tester/demo/sample.jcl``) - Or a LOCAL file in ansible control node. (e.g \ :literal:`/User/tester/ansible-playbook/sample.jcl`\ ) + Or a LOCAL file in ansible control node. (e.g ``/User/tester/ansible-playbook/sample.jcl``) When using a generation data set, only already created generations are valid. If either the relative name is positive, or negative but not found, the module will fail. @@ -44,13 +44,13 @@ src location - The JCL location. Supported choices are \ :literal:`data\_set`\ , \ :literal:`uss`\ or \ :literal:`local`\ . + The JCL location. Supported choices are ``data_set``, ``uss`` or ``local``. - \ :literal:`data\_set`\ can be a PDS, PDSE, sequential data set, or a generation data set. + ``data_set`` can be a PDS, PDSE, sequential data set, or a generation data set. - \ :literal:`uss`\ means the JCL location is located in UNIX System Services (USS). + ``uss`` means the JCL location is located in UNIX System Services (USS). - \ :literal:`local`\ means locally to the Ansible control node. + ``local`` means locally to the Ansible control node. | **required**: False | **type**: str @@ -59,9 +59,9 @@ location wait_time_s - Option \ :emphasis:`wait\_time\_s`\ is the total time that module \ `zos\_job\_submit <./zos_job_submit.html>`__\ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. + Option *wait_time_s* is the total time that module `zos_job_submit <./zos_job_submit.html>`_ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. - \ :emphasis:`wait\_time\_s`\ is measured in seconds and must be a value greater than 0 and less than 86400. + *wait_time_s* is measured in seconds and must be a value greater than 0 and less than 86400. | **required**: False | **type**: int @@ -88,9 +88,9 @@ return_output volume The volume serial (VOLSER) is where the data set resides. The option is required only when the data set is not cataloged on the system. - When configured, the \ `zos\_job\_submit <./zos_job_submit.html>`__\ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. + When configured, the `zos_job_submit <./zos_job_submit.html>`_ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. - Ignored for \ :emphasis:`location=uss`\ and \ :emphasis:`location=local`\ . + Ignored for *location=uss* and *location=local*. | **required**: False | **type**: str @@ -99,7 +99,7 @@ volume encoding Specifies which encoding the local JCL file should be converted from and to, before submitting the job. - This option is only supported for when \ :emphasis:`location=local`\ . + This option is only supported for when *location=local*. If this parameter is not provided, and the z/OS systems default encoding can not be identified, the JCL file will be converted from UTF-8 to IBM-1047 by default, otherwise the module will detect the z/OS system encoding. @@ -131,13 +131,13 @@ encoding use_template - Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when \ :literal:`src`\ is a local file or directory. + Only valid when ``src`` is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ | **required**: False | **type**: bool @@ -147,9 +147,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . + Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. - These options are ignored unless \ :literal:`use\_template`\ is true. + These options are ignored unless ``use_template`` is true. | **required**: False | **type**: dict @@ -228,7 +228,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -330,9 +330,9 @@ Notes ----- .. note:: - For supported character sets used to encode data, refer to the \ `documentation `__\ . + For supported character sets used to encode data, refer to the `documentation `_. - This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. @@ -345,7 +345,7 @@ Return Values jobs - List of jobs output. If no job status is found, this will return an empty ret\_code with msg\_txt explanation. + List of jobs output. If no job status is found, this will return an empty ret_code with msg_txt explanation. | **returned**: success | **type**: list @@ -692,25 +692,25 @@ jobs msg Job status resulting from the job submission. - Job status \`ABEND\` indicates the job ended abnormally. + Job status `ABEND` indicates the job ended abnormally. - Job status \`AC\` indicates the job is active, often a started task or job taking long. + Job status `AC` indicates the job is active, often a started task or job taking long. - Job status \`CAB\` indicates a converter abend. + Job status `CAB` indicates a converter abend. - Job status \`CANCELED\` indicates the job was canceled. + Job status `CANCELED` indicates the job was canceled. - Job status \`CNV\` indicates a converter error. + Job status `CNV` indicates a converter error. - Job status \`FLU\` indicates the job was flushed. + Job status `FLU` indicates the job was flushed. - Job status \`JCLERR\` or \`JCL ERROR\` indicates the JCL has an error. + Job status `JCLERR` or `JCL ERROR` indicates the JCL has an error. - Job status \`SEC\` or \`SEC ERROR\` indicates the job as encountered a security error. + Job status `SEC` or `SEC ERROR` indicates the job as encountered a security error. - Job status \`SYS\` indicates a system failure. + Job status `SYS` indicates a system failure. - Job status \`?\` indicates status can not be determined. + Job status `?` indicates status can not be determined. Jobs where status can not be determined will result in None (NULL). diff --git a/docs/source/modules/zos_lineinfile.rst b/docs/source/modules/zos_lineinfile.rst index e8d0b0eb2..1db6545c5 100644 --- a/docs/source/modules/zos_lineinfile.rst +++ b/docs/source/modules/zos_lineinfile.rst @@ -33,6 +33,8 @@ src The USS file must be an absolute pathname. + Generation data set (GDS) relative name of generation already created. ``e.g. SOME.CREATION(-1).`` + | **required**: True | **type**: str @@ -40,13 +42,13 @@ src regexp The regular expression to look for in every line of the USS file or data set. - For \ :literal:`state=present`\ , the pattern to replace if found. Only the last line found will be replaced. + For ``state=present``, the pattern to replace if found. Only the last line found will be replaced. - For \ :literal:`state=absent`\ , the pattern of the line(s) to remove. + For ``state=absent``, the pattern of the line(s) to remove. - If the regular expression is not matched, the line will be added to the USS file or data set in keeping with \ :literal:`insertbefore`\ or \ :literal:`insertafter`\ settings. + If the regular expression is not matched, the line will be added to the USS file or data set in keeping with ``insertbefore`` or ``insertafter`` settings. - When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by \ :literal:`line`\ to ensure idempotence. + When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by ``line`` to ensure idempotence. | **required**: False | **type**: str @@ -64,22 +66,22 @@ state line The line to insert/replace into the USS file or data set. - Required for \ :literal:`state=present`\ . + Required for ``state=present``. - If \ :literal:`backrefs`\ is set, may contain backreferences that will get expanded with the \ :literal:`regexp`\ capture groups if the regexp matches. + If ``backrefs`` is set, may contain backreferences that will get expanded with the ``regexp`` capture groups if the regexp matches. | **required**: False | **type**: str backrefs - Used with \ :literal:`state=present`\ . + Used with ``state=present``. - If set, \ :literal:`line`\ can contain backreferences (both positional and named) that will get populated if the \ :literal:`regexp`\ matches. + If set, ``line`` can contain backreferences (both positional and named) that will get populated if the ``regexp`` matches. - This parameter changes the operation of the module slightly; \ :literal:`insertbefore`\ and \ :literal:`insertafter`\ will be ignored, and if the \ :literal:`regexp`\ does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. + This parameter changes the operation of the module slightly; ``insertbefore`` and ``insertafter`` will be ignored, and if the ``regexp`` does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. - If the \ :literal:`regexp`\ does match, the last matching line will be replaced by the expanded line parameter. + If the ``regexp`` does match, the last matching line will be replaced by the expanded line parameter. | **required**: False | **type**: bool @@ -87,23 +89,23 @@ backrefs insertafter - Used with \ :literal:`state=present`\ . + Used with ``state=present``. If specified, the line will be inserted after the last match of specified regular expression. If the first match is required, use(firstmatch=yes). - A special value is available; \ :literal:`EOF`\ for inserting the line at the end of the USS file or data set. + A special value is available; ``EOF`` for inserting the line at the end of the USS file or data set. If the specified regular expression has no matches, EOF will be used instead. - If \ :literal:`insertbefore`\ is set, default value \ :literal:`EOF`\ will be ignored. + If ``insertbefore`` is set, default value ``EOF`` will be ignored. - If regular expressions are passed to both \ :literal:`regexp`\ and \ :literal:`insertafter`\ , \ :literal:`insertafter`\ is only honored if no match for \ :literal:`regexp`\ is found. + If regular expressions are passed to both ``regexp`` and ``insertafter``, ``insertafter`` is only honored if no match for ``regexp`` is found. - May not be used with \ :literal:`backrefs`\ or \ :literal:`insertbefore`\ . + May not be used with ``backrefs`` or ``insertbefore``. - Choices are EOF or '\*regex\*' + Choices are EOF or '*regex*' Default is EOF @@ -112,33 +114,35 @@ insertafter insertbefore - Used with \ :literal:`state=present`\ . + Used with ``state=present``. If specified, the line will be inserted before the last match of specified regular expression. - If the first match is required, use \ :literal:`firstmatch=yes`\ . + If the first match is required, use ``firstmatch=yes``. - A value is available; \ :literal:`BOF`\ for inserting the line at the beginning of the USS file or data set. + A value is available; ``BOF`` for inserting the line at the beginning of the USS file or data set. If the specified regular expression has no matches, the line will be inserted at the end of the USS file or data set. - If regular expressions are passed to both \ :literal:`regexp`\ and \ :literal:`insertbefore`\ , \ :literal:`insertbefore`\ is only honored if no match for \ :literal:`regexp`\ is found. + If regular expressions are passed to both ``regexp`` and ``insertbefore``, ``insertbefore`` is only honored if no match for ``regexp`` is found. - May not be used with \ :literal:`backrefs`\ or \ :literal:`insertafter`\ . + May not be used with ``backrefs`` or ``insertafter``. - Choices are BOF or '\*regex\*' + Choices are BOF or '*regex*' | **required**: False | **type**: str backup - Creates a backup file or backup data set for \ :emphasis:`src`\ , including the timestamp information to ensure that you retrieve the original file. + Creates a backup file or backup data set for *src*, including the timestamp information to ensure that you retrieve the original file. - \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . + *backup_name* can be used to specify a backup file name if *backup=true*. The backup file name will be return on either success or failure of module execution such that data can be retrieved. + Use generation data set (GDS) relative positive name SOME.CREATION(+1) + | **required**: False | **type**: bool | **default**: False @@ -147,11 +151,11 @@ backup backup_name Specify the USS file name or data set name for the destination backup. - If the source \ :emphasis:`src`\ is a USS file or path, the backup\_name must be a file or path name, and the USS file or path must be an absolute path name. + If the source *src* is a USS file or path, the backup_name must be a file or path name, and the USS file or path must be an absolute path name. - If the source is an MVS data set, the backup\_name must be an MVS data set name. + If the source is an MVS data set, the backup_name must be an MVS data set name. - If the backup\_name is not provided, the default backup\_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . + If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -162,16 +166,16 @@ backup_name tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str firstmatch - Used with \ :literal:`insertafter`\ or \ :literal:`insertbefore`\ . + Used with ``insertafter`` or ``insertbefore``. - If set, \ :literal:`insertafter`\ and \ :literal:`insertbefore`\ will work with the first line that matches the given regular expression. + If set, ``insertafter`` and ``insertbefore`` will work with the first line that matches the given regular expression. | **required**: False | **type**: bool @@ -179,7 +183,7 @@ firstmatch encoding - The character set of the source \ :emphasis:`src`\ . \ `zos\_lineinfile <./zos_lineinfile.html>`__\ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + The character set of the source *src*. `zos_lineinfile <./zos_lineinfile.html>`_ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. @@ -193,7 +197,7 @@ force This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - The \ :literal:`force`\ option enables sharing of data sets through the disposition \ :emphasis:`DISP=SHR`\ . + The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. | **required**: False | **type**: bool @@ -248,6 +252,20 @@ Examples line: 'Should be a working test now' force: true + - name: Add a line to a gds + zos_lineinfile: + src: SOME.CREATION(-2) + insertafter: EOF + line: 'Should be a working test now' + + - name: Add a line to dataset and backup in a new generation of gds + zos_lineinfile: + src: SOME.CREATION.TEST + insertafter: EOF + backup: true + backup_name: CREATION.GDS(+1) + line: 'Should be a working test now' + @@ -259,7 +277,7 @@ Notes All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - For supported character sets used to encode data, refer to the \ `documentation `__\ . + For supported character sets used to encode data, refer to the `documentation `_. @@ -272,7 +290,7 @@ Return Values changed - Indicates if the source was modified. Value of 1 represents \`true\`, otherwise \`false\`. + Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. | **returned**: success | **type**: bool diff --git a/docs/source/modules/zos_mount.rst b/docs/source/modules/zos_mount.rst index 5bd283453..3b30be909 100644 --- a/docs/source/modules/zos_mount.rst +++ b/docs/source/modules/zos_mount.rst @@ -16,9 +16,9 @@ zos_mount -- Mount a z/OS file system. Synopsis -------- -- The module \ `zos\_mount <./zos_mount.html>`__\ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. -- The \ :emphasis:`src`\ data set must be unique and a Fully Qualified Name (FQN). -- The \ :emphasis:`path`\ will be created if needed. +- The module `zos_mount <./zos_mount.html>`_ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. +- The *src* data set must be unique and a Fully Qualified Name (FQN). +- The *path* will be created if needed. @@ -31,7 +31,7 @@ Parameters path The absolute path name onto which the file system is to be mounted. - The \ :emphasis:`path`\ is case sensitive and must be less than or equal 1023 characters long. + The *path* is case sensitive and must be less than or equal 1023 characters long. | **required**: True | **type**: str @@ -40,9 +40,9 @@ path src The name of the file system to be added to the file system hierarchy. - The file system \ :emphasis:`src`\ must be a data set of type \ :emphasis:`fs\_type`\ . + The file system *src* must be a data set of type *fs_type*. - The file system \ :emphasis:`src`\ data set must be cataloged. + The file system *src* data set must be cataloged. | **required**: True | **type**: str @@ -53,7 +53,7 @@ fs_type The physical file systems data set format to perform the logical mount. - The \ :emphasis:`fs\_type`\ is required to be lowercase. + The *fs_type* is required to be lowercase. | **required**: True | **type**: str @@ -63,25 +63,25 @@ fs_type state The desired status of the described mount (choice). - If \ :emphasis:`state=mounted`\ and \ :emphasis:`src`\ are not in use, the module will add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The \ :emphasis:`path`\ will be updated, the device will be mounted and the module will complete successfully with \ :emphasis:`changed=True`\ . + If *state=mounted* and *src* are not in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will be updated, the device will be mounted and the module will complete successfully with *changed=True*. - If \ :emphasis:`state=mounted`\ and \ :emphasis:`src`\ are in use, the module will add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The \ :emphasis:`path`\ will not be updated, the device will not be mounted and the module will complete successfully with \ :emphasis:`changed=False`\ . + If *state=mounted* and *src* are in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will not be updated, the device will not be mounted and the module will complete successfully with *changed=False*. - If \ :emphasis:`state=unmounted`\ and \ :emphasis:`src`\ are in use, the module will \ :strong:`not`\ add the file system entry to the parmlib member \ :emphasis:`persistent/data\_store`\ . The device will be unmounted and the module will complete successfully with \ :emphasis:`changed=True`\ . + If *state=unmounted* and *src* are in use, the module will **not** add the file system entry to the parmlib member *persistent/data_store*. The device will be unmounted and the module will complete successfully with *changed=True*. - If \ :emphasis:`state=unmounted`\ and \ :emphasis:`src`\ are not in use, the module will \ :strong:`not`\ add the file system entry to parmlib member \ :emphasis:`persistent/data\_store`\ .The device will remain unchanged and the module will complete with \ :emphasis:`changed=False`\ . + If *state=unmounted* and *src* are not in use, the module will **not** add the file system entry to parmlib member *persistent/data_store*.The device will remain unchanged and the module will complete with *changed=False*. - If \ :emphasis:`state=present`\ , the module will add the file system entry to the provided parmlib member \ :emphasis:`persistent/data\_store`\ if not present. The module will complete successfully with \ :emphasis:`changed=True`\ . + If *state=present*, the module will add the file system entry to the provided parmlib member *persistent/data_store* if not present. The module will complete successfully with *changed=True*. - If \ :emphasis:`state=absent`\ , the module will remove the file system entry to the provided parmlib member \ :emphasis:`persistent/data\_store`\ if present. The module will complete successfully with \ :emphasis:`changed=True`\ . + If *state=absent*, the module will remove the file system entry to the provided parmlib member *persistent/data_store* if present. The module will complete successfully with *changed=True*. - If \ :emphasis:`state=remounted`\ , the module will \ :strong:`not`\ add the file system entry to parmlib member \ :emphasis:`persistent/data\_store`\ . The device will be unmounted and mounted, the module will complete successfully with \ :emphasis:`changed=True`\ . + If *state=remounted*, the module will **not** add the file system entry to parmlib member *persistent/data_store*. The device will be unmounted and mounted, the module will complete successfully with *changed=True*. | **required**: False @@ -91,7 +91,7 @@ state persistent - Add or remove mount command entries to provided \ :emphasis:`data\_store`\ + Add or remove mount command entries to provided *data_store* | **required**: False | **type**: dict @@ -105,9 +105,9 @@ persistent backup - Creates a backup file or backup data set for \ :emphasis:`data\_store`\ , including the timestamp information to ensure that you retrieve the original parameters defined in \ :emphasis:`data\_store`\ . + Creates a backup file or backup data set for *data_store*, including the timestamp information to ensure that you retrieve the original parameters defined in *data_store*. - \ :emphasis:`backup\_name`\ can be used to specify a backup file name if \ :emphasis:`backup=true`\ . + *backup_name* can be used to specify a backup file name if *backup=true*. The backup file name will be returned on either success or failure of module execution such that data can be retrieved. @@ -119,11 +119,11 @@ persistent backup_name Specify the USS file name or data set name for the destination backup. - If the source \ :emphasis:`data\_store`\ is a USS file or path, the \ :emphasis:`backup\_name`\ name can be relative or absolute for file or path name. + If the source *data_store* is a USS file or path, the *backup_name* name can be relative or absolute for file or path name. - If the source is an MVS data set, the backup\_name must be an MVS data set name. + If the source is an MVS data set, the backup_name must be an MVS data set name. - If the backup\_name is not provided, the default \ :emphasis:`backup\_name`\ will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, \ :literal:`/path/file\_name.2020-04-23-08-32-29-bak.tar`\ . + If the backup_name is not provided, the default *backup_name* will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. @@ -132,9 +132,9 @@ persistent comment - If provided, this is used as a comment that surrounds the command in the \ :emphasis:`persistent/data\_store`\ + If provided, this is used as a comment that surrounds the command in the *persistent/data_store* - Comments are used to encapsulate the \ :emphasis:`persistent/data\_store`\ entry such that they can easily be understood and located. + Comments are used to encapsulate the *persistent/data_store* entry such that they can easily be understood and located. | **required**: False | **type**: list @@ -145,7 +145,7 @@ persistent unmount_opts Describes how the unmount will be performed. - For more on coded character set identifiers, review the IBM documentation topic \ :strong:`UNMOUNT - Remove a file system from the file hierarchy`\ . + For more on coded character set identifiers, review the IBM documentation topic **UNMOUNT - Remove a file system from the file hierarchy**. | **required**: False | **type**: str @@ -156,13 +156,13 @@ unmount_opts mount_opts Options available to the mount. - If \ :emphasis:`mount\_opts=ro`\ on a mounted/remount, mount is performed read-only. + If *mount_opts=ro* on a mounted/remount, mount is performed read-only. - If \ :emphasis:`mount\_opts=same`\ and (unmount\_opts=remount), mount is opened in the same mode as previously opened. + If *mount_opts=same* and (unmount_opts=remount), mount is opened in the same mode as previously opened. - If \ :emphasis:`mount\_opts=nowait`\ , mount is performed asynchronously. + If *mount_opts=nowait*, mount is performed asynchronously. - If \ :emphasis:`mount\_opts=nosecurity`\ , security checks are not enforced for files in this file system. + If *mount_opts=nosecurity*, security checks are not enforced for files in this file system. | **required**: False | **type**: str @@ -184,11 +184,11 @@ tag_untagged When the file system is unmounted, the tags are lost. - If \ :emphasis:`tag\_untagged=notext`\ none of the untagged files in the file system are automatically converted during file reading and writing. + If *tag_untagged=notext* none of the untagged files in the file system are automatically converted during file reading and writing. - If \ :emphasis:`tag\_untagged=text`\ each untagged file is implicitly marked as containing pure text data that can be converted. + If *tag_untagged=text* each untagged file is implicitly marked as containing pure text data that can be converted. - If this flag is used, use of tag\_ccsid is encouraged. + If this flag is used, use of tag_ccsid is encouraged. | **required**: False | **type**: str @@ -198,13 +198,13 @@ tag_untagged tag_ccsid Identifies the coded character set identifier (ccsid) to be implicitly set for the untagged file. - For more on coded character set identifiers, review the IBM documentation topic \ :strong:`Coded Character Sets`\ . + For more on coded character set identifiers, review the IBM documentation topic **Coded Character Sets**. Specified as a decimal value from 0 to 65535. However, when TEXT is specified, the value must be between 0 and 65535. The value is not checked as being valid and the corresponding code page is not checked as being installed. - Required when \ :emphasis:`tag\_untagged=TEXT`\ . + Required when *tag_untagged=TEXT*. | **required**: False | **type**: int @@ -214,10 +214,10 @@ allow_uid Specifies whether the SETUID and SETGID mode bits on an executable in this file system are considered. Also determines whether the APF extended attribute or the Program Control extended attribute is honored. - If \ :emphasis:`allow\_uid=True`\ the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. + If *allow_uid=True* the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. - If \ :emphasis:`allow\_uid=False`\ the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. + If *allow_uid=False* the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. | **required**: False @@ -226,10 +226,10 @@ allow_uid sysname - For systems participating in shared file system, \ :emphasis:`sysname`\ specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). + For systems participating in shared file system, *sysname* specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). - \ :emphasis:`sysname`\ is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. + *sysname* is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. | **required**: False @@ -240,13 +240,13 @@ automove These parameters apply only in a sysplex where systems are exploiting the shared file system capability. They specify what happens to the ownership of a file system when a shutdown, PFS termination, dead system takeover, or file system move occurs. The default setting is AUTOMOVE where the file system will be randomly moved to another system (no system list used). - \ :emphasis:`automove=automove`\ indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. + *automove=automove* indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. - \ :emphasis:`automove=noautomove`\ prevents movement of the file system's ownership in some situations. + *automove=noautomove* prevents movement of the file system's ownership in some situations. - \ :emphasis:`automove=unmount`\ allows the file system to be unmounted in some situations. + *automove=unmount* allows the file system to be unmounted in some situations. | **required**: False @@ -275,7 +275,7 @@ automove_list tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str @@ -388,7 +388,7 @@ Notes If an uncataloged data set needs to be fetched, it should be cataloged first. - Uncataloged data sets can be cataloged using the \ `zos\_data\_set <./zos_data_set.html>`__\ module. + Uncataloged data sets can be cataloged using the `zos_data_set <./zos_data_set.html>`_ module. @@ -466,7 +466,7 @@ persistent | **sample**: SYS1.FILESYS(PRMAABAK) comment - The text that was used in markers around the \ :emphasis:`Persistent/data\_store`\ entry. + The text that was used in markers around the *Persistent/data_store* entry. | **returned**: always | **type**: list @@ -528,7 +528,7 @@ allow_uid true sysname - \ :emphasis:`sysname`\ specifies the particular system on which a mount should be performed. + *sysname* specifies the particular system on which a mount should be performed. | **returned**: if Non-None | **type**: str diff --git a/docs/source/modules/zos_mvs_raw.rst b/docs/source/modules/zos_mvs_raw.rst index f48418264..817951fe3 100644 --- a/docs/source/modules/zos_mvs_raw.rst +++ b/docs/source/modules/zos_mvs_raw.rst @@ -45,9 +45,9 @@ parm auth Determines whether this program should run with authorized privileges. - If \ :emphasis:`auth=true`\ , the program runs as APF authorized. + If *auth=true*, the program runs as APF authorized. - If \ :emphasis:`auth=false`\ , the program runs as unauthorized. + If *auth=false*, the program runs as unauthorized. | **required**: False | **type**: bool @@ -57,7 +57,7 @@ auth verbose Determines if verbose output should be returned from the underlying utility used by this module. - When \ :emphasis:`verbose=true`\ verbose output is returned on module failure. + When *verbose=true* verbose output is returned on module failure. | **required**: False | **type**: bool @@ -67,19 +67,19 @@ verbose dds The input data source. - \ :emphasis:`dds`\ supports 6 types of sources + *dds* supports 6 types of sources - 1. \ :emphasis:`dd\_data\_set`\ for data set files. + 1. *dd_data_set* for data set files. - 2. \ :emphasis:`dd\_unix`\ for UNIX files. + 2. *dd_unix* for UNIX files. - 3. \ :emphasis:`dd\_input`\ for in-stream data set. + 3. *dd_input* for in-stream data set. - 4. \ :emphasis:`dd\_dummy`\ for no content input. + 4. *dd_dummy* for no content input. - 5. \ :emphasis:`dd\_concat`\ for a data set concatenation. + 5. *dd_concat* for a data set concatenation. - 6. \ :emphasis:`dds`\ supports any combination of source types. + 6. *dds* supports any combination of source types. | **required**: False | **type**: list @@ -89,7 +89,7 @@ dds dd_data_set Specify a data set. - \ :emphasis:`dd\_data\_set`\ can reference an existing data set or be used to define a new data set to be created during execution. + *dd_data_set* can reference an existing data set or be used to define a new data set to be created during execution. | **required**: False | **type**: dict @@ -105,12 +105,16 @@ dds data_set_name The data set name. + A data set name can be a GDS relative name. + + When using GDS relative name and it is a positive generation, *disposition=new* must be used. + | **required**: False | **type**: str type - The data set type. Only required when \ :emphasis:`disposition=new`\ . + The data set type. Only required when *disposition=new*. Maps to DSNTYPE on z/OS. @@ -120,7 +124,7 @@ dds disposition - \ :emphasis:`disposition`\ indicates the status of a data set. + *disposition* indicates the status of a data set. Defaults to shr. @@ -130,7 +134,7 @@ dds disposition_normal - \ :emphasis:`disposition\_normal`\ indicates what to do with the data set after a normal termination of the program. + *disposition_normal* indicates what to do with the data set after a normal termination of the program. | **required**: False | **type**: str @@ -138,7 +142,7 @@ dds disposition_abnormal - \ :emphasis:`disposition\_abnormal`\ indicates what to do with the data set after an abnormal termination of the program. + *disposition_abnormal* indicates what to do with the data set after an abnormal termination of the program. | **required**: False | **type**: str @@ -146,15 +150,15 @@ dds reuse - Determines if a data set should be reused if \ :emphasis:`disposition=new`\ and if a data set with a matching name already exists. + Determines if a data set should be reused if *disposition=new* and if a data set with a matching name already exists. - If \ :emphasis:`reuse=true`\ , \ :emphasis:`disposition`\ will be automatically switched to \ :literal:`SHR`\ . + If *reuse=true*, *disposition* will be automatically switched to ``SHR``. - If \ :emphasis:`reuse=false`\ , and a data set with a matching name already exists, allocation will fail. + If *reuse=false*, and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with \ :emphasis:`replace`\ . + Mutually exclusive with *replace*. - \ :emphasis:`reuse`\ is only considered when \ :emphasis:`disposition=new`\ + *reuse* is only considered when *disposition=new* | **required**: False | **type**: bool @@ -162,17 +166,17 @@ dds replace - Determines if a data set should be replaced if \ :emphasis:`disposition=new`\ and a data set with a matching name already exists. + Determines if a data set should be replaced if *disposition=new* and a data set with a matching name already exists. - If \ :emphasis:`replace=true`\ , the original data set will be deleted, and a new data set created. + If *replace=true*, the original data set will be deleted, and a new data set created. - If \ :emphasis:`replace=false`\ , and a data set with a matching name already exists, allocation will fail. + If *replace=false*, and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with \ :emphasis:`reuse`\ . + Mutually exclusive with *reuse*. - \ :emphasis:`replace`\ is only considered when \ :emphasis:`disposition=new`\ + *replace* is only considered when *disposition=new* - \ :emphasis:`replace`\ will result in loss of all data in the original data set unless \ :emphasis:`backup`\ is specified. + *replace* will result in loss of all data in the original data set unless *backup* is specified. | **required**: False | **type**: bool @@ -180,9 +184,9 @@ dds backup - Determines if a backup should be made of an existing data set when \ :emphasis:`disposition=new`\ , \ :emphasis:`replace=true`\ , and a data set with the desired name is found. + Determines if a backup should be made of an existing data set when *disposition=new*, *replace=true*, and a data set with the desired name is found. - \ :emphasis:`backup`\ is only used when \ :emphasis:`replace=true`\ . + *backup* is only used when *replace=true*. | **required**: False | **type**: bool @@ -190,7 +194,7 @@ dds space_type - The unit of measurement to use when allocating space for a new data set using \ :emphasis:`space\_primary`\ and \ :emphasis:`space\_secondary`\ . + The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. | **required**: False | **type**: str @@ -200,9 +204,9 @@ dds space_primary The primary amount of space to allocate for a new data set. - The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. + The value provided to *space_type* is used as the unit of space for the allocation. - Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . + Not applicable when *space_type=blklgth* or *space_type=reclgth*. | **required**: False | **type**: int @@ -211,9 +215,9 @@ dds space_secondary When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. + The value provided to *space_type* is used as the unit of space for the allocation. - Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . + Not applicable when *space_type=blklgth* or *space_type=reclgth*. | **required**: False | **type**: int @@ -231,7 +235,7 @@ dds sms_management_class The desired management class for a new SMS-managed data set. - \ :emphasis:`sms\_management\_class`\ is ignored if specified for an existing data set. + *sms_management_class* is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -242,7 +246,7 @@ dds sms_storage_class The desired storage class for a new SMS-managed data set. - \ :emphasis:`sms\_storage\_class`\ is ignored if specified for an existing data set. + *sms_storage_class* is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -253,7 +257,7 @@ dds sms_data_class The desired data class for a new SMS-managed data set. - \ :emphasis:`sms\_data\_class`\ is ignored if specified for an existing data set. + *sms_data_class* is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -264,7 +268,7 @@ dds block_size The maximum length of a block in bytes. - Default is dependent on \ :emphasis:`record\_format`\ + Default is dependent on *record_format* | **required**: False | **type**: int @@ -280,9 +284,9 @@ dds key_label The label for the encryption key used by the system to encrypt the data set. - \ :emphasis:`key\_label`\ is the public name of a protected encryption key in the ICSF key repository. + *key_label* is the public name of a protected encryption key in the ICSF key repository. - \ :emphasis:`key\_label`\ should only be provided when creating an extended format data set. + *key_label* should only be provided when creating an extended format data set. Maps to DSKEYLBL on z/OS. @@ -304,7 +308,7 @@ dds Key label must have a private key associated with it. - \ :emphasis:`label`\ can be a maximum of 64 characters. + *label* can be a maximum of 64 characters. Maps to KEYLAB1 on z/OS. @@ -313,9 +317,9 @@ dds encoding - How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. Maps to KEYCD1 on z/OS. @@ -339,7 +343,7 @@ dds Key label must have a private key associated with it. - \ :emphasis:`label`\ can be a maximum of 64 characters. + *label* can be a maximum of 64 characters. Maps to KEYLAB2 on z/OS. @@ -348,9 +352,9 @@ dds encoding - How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. Maps to KEYCD2 on z/OS. @@ -363,7 +367,7 @@ dds key_length The length of the keys used in a new data set. - If using SMS, setting \ :emphasis:`key\_length`\ overrides the key length defined in the SMS data class of the data set. + If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. Valid values are (0-255 non-vsam), (1-255 vsam). @@ -376,14 +380,14 @@ dds The first byte of a logical record is position 0. - Provide \ :emphasis:`key\_offset`\ only for VSAM key-sequenced data sets. + Provide *key_offset* only for VSAM key-sequenced data sets. | **required**: False | **type**: int record_length - The logical record length. (e.g \ :literal:`80`\ ). + The logical record length. (e.g ``80``). For variable data sets, the length must include the 4-byte prefix area. @@ -417,11 +421,11 @@ dds type The type of the content to be returned. - \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . + ``text`` means return content in encoding specified by *response_encoding*. - \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . + *src_encoding* and *response_encoding* are only used when *type=text*. - \ :literal:`base64`\ means return content in binary mode. + ``base64`` means return content in binary mode. | **required**: True | **type**: str @@ -463,7 +467,7 @@ dds path The path to an existing UNIX file. - Or provide the path to an new created UNIX file when \ :emphasis:`status\_group=OCREAT`\ . + Or provide the path to an new created UNIX file when *status_group=OCREAT*. The provided path must be absolute. @@ -488,7 +492,7 @@ dds mode - The file access attributes when the UNIX file is created specified in \ :emphasis:`path`\ . + The file access attributes when the UNIX file is created specified in *path*. Specify the mode as an octal number similarly to chmod. @@ -499,47 +503,47 @@ dds status_group - The status for the UNIX file specified in \ :emphasis:`path`\ . + The status for the UNIX file specified in *path*. - If you do not specify a value for the \ :emphasis:`status\_group`\ parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + If you do not specify a value for the *status_group* parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. Maps to PATHOPTS status group file options on z/OS. You can specify up to 6 choices. - \ :emphasis:`oappend`\ sets the file offset to the end of the file before each write, so that data is written at the end of the file. + *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. - \ :emphasis:`ocreat`\ specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and \ :emphasis:`oexcl`\ was not specified, the system allows the program to use the existing file. If the file already exists and \ :emphasis:`oexcl`\ was specified, the system fails the allocation and the job step. + *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. - \ :emphasis:`oexcl`\ specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores \ :emphasis:`oexcl`\ if \ :emphasis:`ocreat`\ is not also specified. + *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. - \ :emphasis:`onoctty`\ specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - \ :emphasis:`ononblock`\ specifies the following, depending on the type of file + *ononblock* specifies the following, depending on the type of file For a FIFO special file - 1. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`ordonly`\ access, an open function for reading-only returns without delay. + 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. - 2. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`ordonly`\ access, an open function for reading-only blocks (waits) until a process opens the file for writing. + 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. - 3. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`owronly`\ access, an open function for writing-only returns an error if no process currently has the file open for reading. + 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. - 4. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`owronly`\ access, an open function for writing-only blocks (waits) until a process opens the file for reading. + 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. 5. For a character special file that supports nonblocking open - 6. If \ :emphasis:`ononblock`\ is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - 7. If \ :emphasis:`ononblock`\ is not specified, an open function blocks (waits) until the device is ready or available. + 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. - \ :emphasis:`ononblock`\ has no effect on other file types. + *ononblock* has no effect on other file types. - \ :emphasis:`osync`\ specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - \ :emphasis:`otrunc`\ specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with \ :emphasis:`ordwr`\ or \ :emphasis:`owronly`\ . + *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. - When \ :emphasis:`otrunc`\ is specified, the system does not change the mode and owner. \ :emphasis:`otrunc`\ has no effect on FIFO special files or character special files. + When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. | **required**: False | **type**: list @@ -548,7 +552,7 @@ dds access_group - The kind of access to request for the UNIX file specified in \ :emphasis:`path`\ . + The kind of access to request for the UNIX file specified in *path*. | **required**: False | **type**: str @@ -556,7 +560,7 @@ dds file_data_type - The type of data that is (or will be) stored in the file specified in \ :emphasis:`path`\ . + The type of data that is (or will be) stored in the file specified in *path*. Maps to FILEDATA on z/OS. @@ -569,7 +573,7 @@ dds block_size The block size, in bytes, for the UNIX file. - Default is dependent on \ :emphasis:`record\_format`\ + Default is dependent on *record_format* | **required**: False | **type**: int @@ -578,7 +582,7 @@ dds record_length The logical record length for the UNIX file. - \ :emphasis:`record\_length`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. + *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. Maps to LRECL on z/OS. @@ -589,7 +593,7 @@ dds record_format The record format for the UNIX file. - \ :emphasis:`record\_format`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. + *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. | **required**: False | **type**: str @@ -608,11 +612,11 @@ dds type The type of the content to be returned. - \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . + ``text`` means return content in encoding specified by *response_encoding*. - \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . + *src_encoding* and *response_encoding* are only used when *type=text*. - \ :literal:`base64`\ means return content in binary mode. + ``base64`` means return content in binary mode. | **required**: True | **type**: str @@ -638,7 +642,7 @@ dds dd_input - \ :emphasis:`dd\_input`\ is used to specify an in-stream data set. + *dd_input* is used to specify an in-stream data set. Input will be saved to a temporary data set with a record length of 80. @@ -656,15 +660,15 @@ dds content The input contents for the DD. - \ :emphasis:`dd\_input`\ supports single or multiple lines of input. + *dd_input* supports single or multiple lines of input. Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. If a list of strings is provided, newlines will be added to each of the lines when used as input. - If a multi-line string is provided, use the proper block scalar style. YAML supports both \ `literal `__\ and \ `folded `__\ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; \ :emphasis:`content: | 2`\ is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block \ `chomping `__\ indicators "+" and "-" as well. + If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. - When using the \ :emphasis:`content`\ option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all \ :emphasis:`content`\ types; string, list of strings and when using a YAML block indicator. + When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. | **required**: True | **type**: raw @@ -682,11 +686,11 @@ dds type The type of the content to be returned. - \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . + ``text`` means return content in encoding specified by *response_encoding*. - \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . + *src_encoding* and *response_encoding* are only used when *type=text*. - \ :literal:`base64`\ means return content in binary mode. + ``base64`` means return content in binary mode. | **required**: True | **type**: str @@ -696,7 +700,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. + for *dd_input*, *src_encoding* should generally not need to be changed. | **required**: False | **type**: str @@ -714,7 +718,7 @@ dds dd_output - Use \ :emphasis:`dd\_output`\ to specify - Content sent to the DD should be returned to the user. + Use *dd_output* to specify - Content sent to the DD should be returned to the user. | **required**: False | **type**: dict @@ -739,11 +743,11 @@ dds type The type of the content to be returned. - \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . + ``text`` means return content in encoding specified by *response_encoding*. - \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . + *src_encoding* and *response_encoding* are only used when *type=text*. - \ :literal:`base64`\ means return content in binary mode. + ``base64`` means return content in binary mode. | **required**: True | **type**: str @@ -753,7 +757,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. + for *dd_input*, *src_encoding* should generally not need to be changed. | **required**: False | **type**: str @@ -771,9 +775,9 @@ dds dd_dummy - Use \ :emphasis:`dd\_dummy`\ to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. + Use *dd_dummy* to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. - \ :emphasis:`dd\_dummy`\ accepts no content input. + *dd_dummy* accepts no content input. | **required**: False | **type**: dict @@ -788,7 +792,7 @@ dds dd_vio - \ :emphasis:`dd\_vio`\ is used to handle temporary data sets. + *dd_vio* is used to handle temporary data sets. VIO data sets reside in the paging space; but, to the problem program and the access method, the data sets appear to reside on a direct access storage device. @@ -807,7 +811,7 @@ dds dd_concat - \ :emphasis:`dd\_concat`\ is used to specify a data set concatenation. + *dd_concat* is used to specify a data set concatenation. | **required**: False | **type**: dict @@ -821,7 +825,7 @@ dds dds - A list of DD statements, which can contain any of the following types: \ :emphasis:`dd\_data\_set`\ , \ :emphasis:`dd\_unix`\ , and \ :emphasis:`dd\_input`\ . + A list of DD statements, which can contain any of the following types: *dd_data_set*, *dd_unix*, and *dd_input*. | **required**: False | **type**: list @@ -831,7 +835,7 @@ dds dd_data_set Specify a data set. - \ :emphasis:`dd\_data\_set`\ can reference an existing data set. The data set referenced with \ :literal:`data\_set\_name`\ must be allocated before the module \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ is run, you can use \ `zos\_data\_set <./zos_data_set.html>`__\ to allocate a data set. + *dd_data_set* can reference an existing data set. The data set referenced with ``data_set_name`` must be allocated before the module `zos_mvs_raw <./zos_mvs_raw.html>`_ is run, you can use `zos_data_set <./zos_data_set.html>`_ to allocate a data set. | **required**: False | **type**: dict @@ -840,12 +844,16 @@ dds data_set_name The data set name. + A data set name can be a GDS relative name. + + When using GDS relative name and it is a positive generation, *disposition=new* must be used. + | **required**: False | **type**: str type - The data set type. Only required when \ :emphasis:`disposition=new`\ . + The data set type. Only required when *disposition=new*. Maps to DSNTYPE on z/OS. @@ -855,7 +863,7 @@ dds disposition - \ :emphasis:`disposition`\ indicates the status of a data set. + *disposition* indicates the status of a data set. Defaults to shr. @@ -865,7 +873,7 @@ dds disposition_normal - \ :emphasis:`disposition\_normal`\ indicates what to do with the data set after normal termination of the program. + *disposition_normal* indicates what to do with the data set after normal termination of the program. | **required**: False | **type**: str @@ -873,7 +881,7 @@ dds disposition_abnormal - \ :emphasis:`disposition\_abnormal`\ indicates what to do with the data set after abnormal termination of the program. + *disposition_abnormal* indicates what to do with the data set after abnormal termination of the program. | **required**: False | **type**: str @@ -881,15 +889,15 @@ dds reuse - Determines if data set should be reused if \ :emphasis:`disposition=new`\ and a data set with matching name already exists. + Determines if data set should be reused if *disposition=new* and a data set with matching name already exists. - If \ :emphasis:`reuse=true`\ , \ :emphasis:`disposition`\ will be automatically switched to \ :literal:`SHR`\ . + If *reuse=true*, *disposition* will be automatically switched to ``SHR``. - If \ :emphasis:`reuse=false`\ , and a data set with a matching name already exists, allocation will fail. + If *reuse=false*, and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with \ :emphasis:`replace`\ . + Mutually exclusive with *replace*. - \ :emphasis:`reuse`\ is only considered when \ :emphasis:`disposition=new`\ + *reuse* is only considered when *disposition=new* | **required**: False | **type**: bool @@ -897,17 +905,17 @@ dds replace - Determines if data set should be replaced if \ :emphasis:`disposition=new`\ and a data set with matching name already exists. + Determines if data set should be replaced if *disposition=new* and a data set with matching name already exists. - If \ :emphasis:`replace=true`\ , the original data set will be deleted, and a new data set created. + If *replace=true*, the original data set will be deleted, and a new data set created. - If \ :emphasis:`replace=false`\ , and a data set with a matching name already exists, allocation will fail. + If *replace=false*, and a data set with a matching name already exists, allocation will fail. - Mutually exclusive with \ :emphasis:`reuse`\ . + Mutually exclusive with *reuse*. - \ :emphasis:`replace`\ is only considered when \ :emphasis:`disposition=new`\ + *replace* is only considered when *disposition=new* - \ :emphasis:`replace`\ will result in loss of all data in the original data set unless \ :emphasis:`backup`\ is specified. + *replace* will result in loss of all data in the original data set unless *backup* is specified. | **required**: False | **type**: bool @@ -915,9 +923,9 @@ dds backup - Determines if a backup should be made of existing data set when \ :emphasis:`disposition=new`\ , \ :emphasis:`replace=true`\ , and a data set with the desired name is found. + Determines if a backup should be made of existing data set when *disposition=new*, *replace=true*, and a data set with the desired name is found. - \ :emphasis:`backup`\ is only used when \ :emphasis:`replace=true`\ . + *backup* is only used when *replace=true*. | **required**: False | **type**: bool @@ -925,7 +933,7 @@ dds space_type - The unit of measurement to use when allocating space for a new data set using \ :emphasis:`space\_primary`\ and \ :emphasis:`space\_secondary`\ . + The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. | **required**: False | **type**: str @@ -935,9 +943,9 @@ dds space_primary The primary amount of space to allocate for a new data set. - The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. + The value provided to *space_type* is used as the unit of space for the allocation. - Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . + Not applicable when *space_type=blklgth* or *space_type=reclgth*. | **required**: False | **type**: int @@ -946,9 +954,9 @@ dds space_secondary When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - The value provided to \ :emphasis:`space\_type`\ is used as the unit of space for the allocation. + The value provided to *space_type* is used as the unit of space for the allocation. - Not applicable when \ :emphasis:`space\_type=blklgth`\ or \ :emphasis:`space\_type=reclgth`\ . + Not applicable when *space_type=blklgth* or *space_type=reclgth*. | **required**: False | **type**: int @@ -966,7 +974,7 @@ dds sms_management_class The desired management class for a new SMS-managed data set. - \ :emphasis:`sms\_management\_class`\ is ignored if specified for an existing data set. + *sms_management_class* is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -977,7 +985,7 @@ dds sms_storage_class The desired storage class for a new SMS-managed data set. - \ :emphasis:`sms\_storage\_class`\ is ignored if specified for an existing data set. + *sms_storage_class* is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -988,7 +996,7 @@ dds sms_data_class The desired data class for a new SMS-managed data set. - \ :emphasis:`sms\_data\_class`\ is ignored if specified for an existing data set. + *sms_data_class* is ignored if specified for an existing data set. All values must be between 1-8 alpha-numeric characters. @@ -999,7 +1007,7 @@ dds block_size The maximum length of a block in bytes. - Default is dependent on \ :emphasis:`record\_format`\ + Default is dependent on *record_format* | **required**: False | **type**: int @@ -1015,9 +1023,9 @@ dds key_label The label for the encryption key used by the system to encrypt the data set. - \ :emphasis:`key\_label`\ is the public name of a protected encryption key in the ICSF key repository. + *key_label* is the public name of a protected encryption key in the ICSF key repository. - \ :emphasis:`key\_label`\ should only be provided when creating an extended format data set. + *key_label* should only be provided when creating an extended format data set. Maps to DSKEYLBL on z/OS. @@ -1039,7 +1047,7 @@ dds Key label must have a private key associated with it. - \ :emphasis:`label`\ can be a maximum of 64 characters. + *label* can be a maximum of 64 characters. Maps to KEYLAB1 on z/OS. @@ -1048,9 +1056,9 @@ dds encoding - How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. Maps to KEYCD1 on z/OS. @@ -1074,7 +1082,7 @@ dds Key label must have a private key associated with it. - \ :emphasis:`label`\ can be a maximum of 64 characters. + *label* can be a maximum of 64 characters. Maps to KEYLAB2 on z/OS. @@ -1083,9 +1091,9 @@ dds encoding - How the label for the key encrypting key specified by \ :emphasis:`label`\ is encoded by the Encryption Key Manager. + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - \ :emphasis:`encoding`\ can either be set to \ :literal:`l`\ for label encoding, or \ :literal:`h`\ for hash encoding. + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. Maps to KEYCD2 on z/OS. @@ -1098,7 +1106,7 @@ dds key_length The length of the keys used in a new data set. - If using SMS, setting \ :emphasis:`key\_length`\ overrides the key length defined in the SMS data class of the data set. + If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. Valid values are (0-255 non-vsam), (1-255 vsam). @@ -1111,14 +1119,14 @@ dds The first byte of a logical record is position 0. - Provide \ :emphasis:`key\_offset`\ only for VSAM key-sequenced data sets. + Provide *key_offset* only for VSAM key-sequenced data sets. | **required**: False | **type**: int record_length - The logical record length. (e.g \ :literal:`80`\ ). + The logical record length. (e.g ``80``). For variable data sets, the length must include the 4-byte prefix area. @@ -1152,11 +1160,11 @@ dds type The type of the content to be returned. - \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . + ``text`` means return content in encoding specified by *response_encoding*. - \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . + *src_encoding* and *response_encoding* are only used when *type=text*. - \ :literal:`base64`\ means return content in binary mode. + ``base64`` means return content in binary mode. | **required**: True | **type**: str @@ -1191,7 +1199,7 @@ dds path The path to an existing UNIX file. - Or provide the path to an new created UNIX file when \ :emphasis:`status\_group=ocreat`\ . + Or provide the path to an new created UNIX file when *status_group=ocreat*. The provided path must be absolute. @@ -1216,7 +1224,7 @@ dds mode - The file access attributes when the UNIX file is created specified in \ :emphasis:`path`\ . + The file access attributes when the UNIX file is created specified in *path*. Specify the mode as an octal number similar to chmod. @@ -1227,47 +1235,47 @@ dds status_group - The status for the UNIX file specified in \ :emphasis:`path`\ . + The status for the UNIX file specified in *path*. - If you do not specify a value for the \ :emphasis:`status\_group`\ parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + If you do not specify a value for the *status_group* parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. Maps to PATHOPTS status group file options on z/OS. You can specify up to 6 choices. - \ :emphasis:`oappend`\ sets the file offset to the end of the file before each write, so that data is written at the end of the file. + *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. - \ :emphasis:`ocreat`\ specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and \ :emphasis:`oexcl`\ was not specified, the system allows the program to use the existing file. If the file already exists and \ :emphasis:`oexcl`\ was specified, the system fails the allocation and the job step. + *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. - \ :emphasis:`oexcl`\ specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores \ :emphasis:`oexcl`\ if \ :emphasis:`ocreat`\ is not also specified. + *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. - \ :emphasis:`onoctty`\ specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - \ :emphasis:`ononblock`\ specifies the following, depending on the type of file + *ononblock* specifies the following, depending on the type of file For a FIFO special file - 1. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`ordonly`\ access, an open function for reading-only returns without delay. + 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. - 2. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`ordonly`\ access, an open function for reading-only blocks (waits) until a process opens the file for writing. + 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. - 3. With \ :emphasis:`ononblock`\ specified and \ :emphasis:`owronly`\ access, an open function for writing-only returns an error if no process currently has the file open for reading. + 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. - 4. With \ :emphasis:`ononblock`\ not specified and \ :emphasis:`owronly`\ access, an open function for writing-only blocks (waits) until a process opens the file for reading. + 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. 5. For a character special file that supports nonblocking open - 6. If \ :emphasis:`ononblock`\ is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - 7. If \ :emphasis:`ononblock`\ is not specified, an open function blocks (waits) until the device is ready or available. + 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. - \ :emphasis:`ononblock`\ has no effect on other file types. + *ononblock* has no effect on other file types. - \ :emphasis:`osync`\ specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - \ :emphasis:`otrunc`\ specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with \ :emphasis:`ordwr`\ or \ :emphasis:`owronly`\ . + *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. - When \ :emphasis:`otrunc`\ is specified, the system does not change the mode and owner. \ :emphasis:`otrunc`\ has no effect on FIFO special files or character special files. + When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. | **required**: False | **type**: list @@ -1276,7 +1284,7 @@ dds access_group - The kind of access to request for the UNIX file specified in \ :emphasis:`path`\ . + The kind of access to request for the UNIX file specified in *path*. | **required**: False | **type**: str @@ -1284,7 +1292,7 @@ dds file_data_type - The type of data that is (or will be) stored in the file specified in \ :emphasis:`path`\ . + The type of data that is (or will be) stored in the file specified in *path*. Maps to FILEDATA on z/OS. @@ -1297,7 +1305,7 @@ dds block_size The block size, in bytes, for the UNIX file. - Default is dependent on \ :emphasis:`record\_format`\ + Default is dependent on *record_format* | **required**: False | **type**: int @@ -1306,7 +1314,7 @@ dds record_length The logical record length for the UNIX file. - \ :emphasis:`record\_length`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. + *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. Maps to LRECL on z/OS. @@ -1317,7 +1325,7 @@ dds record_format The record format for the UNIX file. - \ :emphasis:`record\_format`\ is required in situations where the data will be processed as records and therefore, \ :emphasis:`record\_length`\ , \ :emphasis:`block\_size`\ and \ :emphasis:`record\_format`\ need to be supplied since a UNIX file would normally be treated as a stream of bytes. + *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. | **required**: False | **type**: str @@ -1336,11 +1344,11 @@ dds type The type of the content to be returned. - \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . + ``text`` means return content in encoding specified by *response_encoding*. - \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . + *src_encoding* and *response_encoding* are only used when *type=text*. - \ :literal:`base64`\ means return content in binary mode. + ``base64`` means return content in binary mode. | **required**: True | **type**: str @@ -1366,7 +1374,7 @@ dds dd_input - \ :emphasis:`dd\_input`\ is used to specify an in-stream data set. + *dd_input* is used to specify an in-stream data set. Input will be saved to a temporary data set with a record length of 80. @@ -1377,15 +1385,15 @@ dds content The input contents for the DD. - \ :emphasis:`dd\_input`\ supports single or multiple lines of input. + *dd_input* supports single or multiple lines of input. Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. If a list of strings is provided, newlines will be added to each of the lines when used as input. - If a multi-line string is provided, use the proper block scalar style. YAML supports both \ `literal `__\ and \ `folded `__\ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; \ :emphasis:`content: | 2`\ is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block \ `chomping `__\ indicators "+" and "-" as well. + If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. - When using the \ :emphasis:`content`\ option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all \ :emphasis:`content`\ types; string, list of strings and when using a YAML block indicator. + When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. | **required**: True | **type**: raw @@ -1403,11 +1411,11 @@ dds type The type of the content to be returned. - \ :literal:`text`\ means return content in encoding specified by \ :emphasis:`response\_encoding`\ . + ``text`` means return content in encoding specified by *response_encoding*. - \ :emphasis:`src\_encoding`\ and \ :emphasis:`response\_encoding`\ are only used when \ :emphasis:`type=text`\ . + *src_encoding* and *response_encoding* are only used when *type=text*. - \ :literal:`base64`\ means return content in binary mode. + ``base64`` means return content in binary mode. | **required**: True | **type**: str @@ -1417,7 +1425,7 @@ dds src_encoding The encoding of the data set on the z/OS system. - for \ :emphasis:`dd\_input`\ , \ :emphasis:`src\_encoding`\ should generally not need to be changed. + for *dd_input*, *src_encoding* should generally not need to be changed. | **required**: False | **type**: str @@ -1440,7 +1448,7 @@ dds tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str @@ -1748,6 +1756,37 @@ Examples VOLUMES(222222) - UNIQUE) + - name: List data sets matching pattern in catalog, + save output to a new generation of gdgs. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: TEST.CREATION(+1) + disposition: new + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: List data sets matching pattern in catalog, + save output to a gds already created. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: TEST.CREATION(-2) + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + @@ -1755,11 +1794,11 @@ Notes ----- .. note:: - When executing programs using \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ , you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. + When executing programs using `zos_mvs_raw <./zos_mvs_raw.html>`_, you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. - 1. \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. + 1. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. - 2. \ `zos\_mvs\_raw <./zos_mvs_raw.html>`__\ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. + 2. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. 3. When executing a program, refer to the programs documentation as each programs requirments can vary fom DDs, instream-data indentation and continuation characters. @@ -1837,7 +1876,7 @@ backups | **type**: str backup_name - The name of the data set containing the backup of content from data set in original\_name. + The name of the data set containing the backup of content from data set in original_name. | **type**: str diff --git a/docs/source/modules/zos_operator.rst b/docs/source/modules/zos_operator.rst index 8f7e76df1..5bc803962 100644 --- a/docs/source/modules/zos_operator.rst +++ b/docs/source/modules/zos_operator.rst @@ -33,10 +33,12 @@ cmd For example, change the command "...,P='DSN3EPX,-DBC1,S'" to "...,P=''DSN3EPX,-DBC1,S'' ". - If the command contains any special characters ($, &, etc), they must be escaped using double backslashes like \\\\\\$. + If the command contains any special characters ($, &, etc), they must be escaped using double backslashes like \\\\$. For example, to display job by job name the command would be ``cmd:"\\$dj''HELLO''"`` + By default, the command will be converted to uppercase before execution, to control this behavior, see the \ :emphasis:`case\_sensitive`\ option below. + | **required**: True | **type**: str @@ -56,13 +58,21 @@ wait_time_s This option is helpful on a busy system requiring more time to execute commands. - Setting \ :emphasis:`wait`\ can instruct if execution should wait the full \ :emphasis:`wait\_time\_s`\ . + Setting *wait* can instruct if execution should wait the full *wait_time_s*. | **required**: False | **type**: int | **default**: 1 +case_sensitive + If \ :literal:`true`\ , the command will not be converted to uppercase before execution. Instead, the casing will be preserved just as it was written in a task. + + | **required**: False + | **type**: bool + | **default**: False + + Examples @@ -100,7 +110,7 @@ Notes ----- .. note:: - Commands may need to use specific prefixes like $, they can be discovered by issuing the following command \ :literal:`D OPDATA,PREFIX`\ . + Commands may need to use specific prefixes like $, they can be discovered by issuing the following command ``D OPDATA,PREFIX``. diff --git a/docs/source/modules/zos_operator_action_query.rst b/docs/source/modules/zos_operator_action_query.rst index b7956c8b8..ba9398b50 100644 --- a/docs/source/modules/zos_operator_action_query.rst +++ b/docs/source/modules/zos_operator_action_query.rst @@ -31,7 +31,7 @@ system If the system name is not specified, all outstanding messages for that system and for the local systems attached to it are returned. - A trailing asterisk, (\*) wildcard is supported. + A trailing asterisk, (*) wildcard is supported. | **required**: False | **type**: str @@ -42,7 +42,7 @@ message_id If the message identifier is not specified, all outstanding messages for all message identifiers are returned. - A trailing asterisk, (\*) wildcard is supported. + A trailing asterisk, (*) wildcard is supported. | **required**: False | **type**: str @@ -53,7 +53,7 @@ job_name If the message job name is not specified, all outstanding messages for all job names are returned. - A trailing asterisk, (\*) wildcard is supported. + A trailing asterisk, (*) wildcard is supported. | **required**: False | **type**: str @@ -69,24 +69,24 @@ message_filter filter - Specifies the substring or regex to match to the outstanding messages, see \ :emphasis:`use\_regex`\ . + Specifies the substring or regex to match to the outstanding messages, see *use_regex*. All special characters in a filter string that are not a regex are escaped. - Valid Python regular expressions are supported. See \ `the official documentation `__\ for more information. + Valid Python regular expressions are supported. See `the official documentation `_ for more information. - Regular expressions are compiled with the flag \ :strong:`re.DOTALL`\ which makes the \ :strong:`'.'`\ special character match any character including a newline." + Regular expressions are compiled with the flag **re.DOTALL** which makes the **'.'** special character match any character including a newline." | **required**: True | **type**: str use_regex - Indicates that the value for \ :emphasis:`filter`\ is a regex or a string to match. + Indicates that the value for *filter* is a regex or a string to match. - If False, the module assumes that \ :emphasis:`filter`\ is not a regex and matches the \ :emphasis:`filter`\ substring on the outstanding messages. + If False, the module assumes that *filter* is not a regex and matches the *filter* substring on the outstanding messages. - If True, the module creates a regex from the \ :emphasis:`filter`\ string and matches it to the outstanding messages. + If True, the module creates a regex from the *filter* string and matches it to the outstanding messages. | **required**: False | **type**: bool @@ -222,7 +222,7 @@ actions | **sample**: STC01537 message_text - Content of the outstanding message requiring operator action awaiting a reply. If \ :emphasis:`message\_filter`\ is set, \ :emphasis:`message\_text`\ will be filtered accordingly. + Content of the outstanding message requiring operator action awaiting a reply. If *message_filter* is set, *message_text* will be filtered accordingly. | **returned**: success | **type**: str diff --git a/docs/source/modules/zos_ping.rst b/docs/source/modules/zos_ping.rst index acb901790..a4405b473 100644 --- a/docs/source/modules/zos_ping.rst +++ b/docs/source/modules/zos_ping.rst @@ -16,9 +16,9 @@ zos_ping -- Ping z/OS and check dependencies. Synopsis -------- -- \ `zos\_ping <./zos_ping.html>`__\ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. -- \ `zos\_ping <./zos_ping.html>`__\ returns \ :literal:`pong`\ when the target host is not missing any required dependencies. -- If the target host is missing optional dependencies, the \ `zos\_ping <./zos_ping.html>`__\ will return one or more warning messages. +- `zos_ping <./zos_ping.html>`_ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. +- `zos_ping <./zos_ping.html>`_ returns ``pong`` when the target host is not missing any required dependencies. +- If the target host is missing optional dependencies, the `zos_ping <./zos_ping.html>`_ will return one or more warning messages. - If a required dependency is missing from the target host, an explanatory message will be returned with the module failure. @@ -44,7 +44,7 @@ Notes ----- .. note:: - This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry \ :literal:`scp\_extra\_args="-O"`\ into the ini file named \ :literal:`ansible.cfg`\ . + This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry ``scp_extra_args="-O"`` into the ini file named ``ansible.cfg``. diff --git a/docs/source/modules/zos_script.rst b/docs/source/modules/zos_script.rst index d2977c486..10660d38a 100644 --- a/docs/source/modules/zos_script.rst +++ b/docs/source/modules/zos_script.rst @@ -16,7 +16,7 @@ zos_script -- Run scripts in z/OS Synopsis -------- -- The \ `zos\_script <./zos_script.html>`__\ module runs a local or remote script in the remote machine. +- The `zos_script <./zos_script.html>`_ module runs a local or remote script in the remote machine. @@ -56,7 +56,7 @@ creates encoding Specifies which encodings the script should be converted from and to. - If \ :literal:`encoding`\ is not provided, the module determines which local and remote charsets to convert the data from and to. + If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. | **required**: False | **type**: dict @@ -87,9 +87,9 @@ executable remote_src - If set to \ :literal:`false`\ , the module will search the script in the controller. + If set to ``false``, the module will search the script in the controller. - If set to \ :literal:`true`\ , the module will search the script in the remote machine. + If set to ``true``, the module will search the script in the remote machine. | **required**: False | **type**: bool @@ -103,13 +103,13 @@ removes use_template - Whether the module should treat \ :literal:`src`\ as a Jinja2 template and render it before continuing with the rest of the module. + Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. - Only valid when \ :literal:`src`\ is a local file or directory. + Only valid when ``src`` is a local file or directory. - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as \ `Ansible special variables `__\ , such as \ :literal:`playbook\_dir`\ , \ :literal:`ansible\_version`\ , etc. + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order \ `in Ansible's documentation `__\ + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ | **required**: False | **type**: bool @@ -119,9 +119,9 @@ use_template template_parameters Options to set the way Jinja2 will process templates. - Jinja2 already sets defaults for the markers it uses, you can find more information at its \ `official documentation `__\ . + Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. - These options are ignored unless \ :literal:`use\_template`\ is true. + These options are ignored unless ``use_template`` is true. | **required**: False | **type**: dict @@ -200,7 +200,7 @@ template_parameters trim_blocks Whether Jinja2 should remove the first newline after a block is removed. - Setting this option to \ :literal:`False`\ will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. | **required**: False | **type**: bool @@ -290,7 +290,7 @@ Notes .. note:: When executing local scripts, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file being copied. - The location in the z/OS system where local scripts will be copied to can be configured through Ansible's \ :literal:`remote\_tmp`\ option. Refer to \ `Ansible's documentation `__\ for more information. + The location in the z/OS system where local scripts will be copied to can be configured through Ansible's ``remote_tmp`` option. Refer to `Ansible's documentation `_ for more information. All local scripts copied to a remote z/OS system will be removed from the managed node before the module finishes executing. @@ -298,13 +298,13 @@ Notes The module will only add execution permissions for the file owner. - If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error \ :literal:`BPXW0003I`\ . + If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error ``BPXW0003I``. - For supported character sets used to encode data, refer to the \ `documentation `__\ . + For supported character sets used to encode data, refer to the `documentation `_. - This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with \ `zos\_tso\_command <./zos_tso_command.html>`__\ . + This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with `zos_tso_command <./zos_tso_command.html>`_. The community script module does not rely on Python to execute scripts on a managed node, while this module does. Python must be present on the remote machine. diff --git a/docs/source/modules/zos_tso_command.rst b/docs/source/modules/zos_tso_command.rst index b35c13a1b..4af6b1b52 100644 --- a/docs/source/modules/zos_tso_command.rst +++ b/docs/source/modules/zos_tso_command.rst @@ -40,7 +40,7 @@ commands max_rc Specifies the maximum return code allowed for a TSO command. - If more than one TSO command is submitted, the \ :emphasis:`max\_rc`\ applies to all TSO commands. + If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. | **required**: False | **type**: int @@ -119,7 +119,7 @@ output max_rc Specifies the maximum return code allowed for a TSO command. - If more than one TSO command is submitted, the \ :emphasis:`max\_rc`\ applies to all TSO commands. + If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. | **returned**: always | **type**: int diff --git a/docs/source/modules/zos_unarchive.rst b/docs/source/modules/zos_unarchive.rst index ed6a26a8f..89b4b065c 100644 --- a/docs/source/modules/zos_unarchive.rst +++ b/docs/source/modules/zos_unarchive.rst @@ -16,8 +16,8 @@ zos_unarchive -- Unarchive files and data sets in z/OS. Synopsis -------- -- The \ :literal:`zos\_unarchive`\ module unpacks an archive after optionally transferring it to the remote system. -- For supported archive formats, see option \ :literal:`format`\ . +- The ``zos_unarchive`` module unpacks an archive after optionally transferring it to the remote system. +- For supported archive formats, see option ``format``. - Supported sources are USS (UNIX System Services) or z/OS data sets. - Mixing MVS data sets with USS files for unarchiving is not supported. - The archive is sent to the remote as binary, so no encoding is performed. @@ -33,11 +33,13 @@ Parameters src The remote absolute path or data set of the archive to be uncompressed. - \ :emphasis:`src`\ can be a USS file or MVS data set name. + *src* can be a USS file or MVS data set name. USS file paths should be absolute paths. - MVS data sets supported types are \ :literal:`SEQ`\ , \ :literal:`PDS`\ , \ :literal:`PDSE`\ . + MVS data sets supported types are ``SEQ``, ``PDS``, ``PDSE``. + + GDS relative names are supported ``e.g. USER.GDG(-1)``. | **required**: True | **type**: str @@ -72,14 +74,14 @@ format If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - When providing the \ :emphasis:`xmit\_log\_data\_set`\ name, ensure there is adequate space. + When providing the *xmit_log_data_set* name, ensure there is adequate space. | **required**: False | **type**: str use_adrdssu - If set to true, the \ :literal:`zos\_archive`\ module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using \ :literal:`xmit`\ or \ :literal:`terse`\ . + If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using ``xmit`` or ``terse``. | **required**: False | **type**: bool @@ -87,7 +89,7 @@ format dest_volumes - When \ :emphasis:`use\_adrdssu=True`\ , specify the volume the data sets will be written to. + When *use_adrdssu=True*, specify the volume the data sets will be written to. If no volume is specified, storage management rules will be used to determine the volume where the file will be unarchived. @@ -103,7 +105,7 @@ format dest The remote absolute path or data set where the content should be unarchived to. - \ :emphasis:`dest`\ can be a USS file, directory or MVS data set name. + *dest* can be a USS file, directory or MVS data set name. If dest has missing parent directories, they will not be created. @@ -116,7 +118,7 @@ group When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. - This option is only applicable if \ :literal:`dest`\ is USS, otherwise ignored. + This option is only applicable if ``dest`` is USS, otherwise ignored. | **required**: False | **type**: str @@ -125,13 +127,13 @@ group mode The permission of the uncompressed files. - If \ :literal:`dest`\ is USS, this will act as Unix file mode, otherwise ignored. + If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like \ :literal:`0644`\ or \ :literal:`01777`\ )or quote it (like \ :literal:`'644'`\ or \ :literal:`'1777'`\ ) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. - The mode may also be specified as a symbolic mode (for example, \`\`u+rwx\`\` or \`\`u=rw,g=r,o=r\`\`) or a special string \`preserve\`. + The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. - \ :emphasis:`mode=preserve`\ means that the file will be given the same permissions as the source file. + *mode=preserve* means that the file will be given the same permissions as the source file. | **required**: False | **type**: str @@ -149,7 +151,9 @@ owner include A list of directories, files or data set names to extract from the archive. - When \ :literal:`include`\ is set, only those files will we be extracted leaving the remaining files in the archive. + GDS relative names are supported ``e.g. USER.GDG(-1)``. + + When ``include`` is set, only those files will we be extracted leaving the remaining files in the archive. Mutually exclusive with exclude. @@ -161,6 +165,8 @@ include exclude List the directory and file or data set names that you would like to exclude from the unarchive action. + GDS relative names are supported ``e.g. USER.GDG(-1)``. + Mutually exclusive with include. | **required**: False @@ -177,7 +183,7 @@ list dest_data_set - Data set attributes to customize a \ :literal:`dest`\ data set that the archive will be copied into. + Data set attributes to customize a ``dest`` data set that the archive will be copied into. | **required**: False | **type**: dict @@ -200,18 +206,18 @@ dest_data_set space_primary - If the destination \ :emphasis:`dest`\ data set does not exist , this sets the primary space allocated for the data set. + If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int space_secondary - If the destination \ :emphasis:`dest`\ data set does not exist , this sets the secondary space allocated for the data set. + If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. - The unit of space used is set using \ :emphasis:`space\_type`\ . + The unit of space used is set using *space_type*. | **required**: False | **type**: int @@ -220,7 +226,7 @@ dest_data_set space_type If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - Valid units of size are \ :literal:`k`\ , \ :literal:`m`\ , \ :literal:`g`\ , \ :literal:`cyl`\ , and \ :literal:`trk`\ . + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. | **required**: False | **type**: str @@ -228,7 +234,7 @@ dest_data_set record_format - If the destination data set does not exist, this sets the format of the data set. (e.g \ :literal:`fb`\ ) + If the destination data set does not exist, this sets the format of the data set. (e.g ``fb``) Choices are case-sensitive. @@ -265,9 +271,9 @@ dest_data_set key_offset The key offset to use when creating a KSDS data set. - \ :emphasis:`key\_offset`\ is required when \ :emphasis:`type=ksds`\ . + *key_offset* is required when *type=ksds*. - \ :emphasis:`key\_offset`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_offset* should only be provided when *type=ksds* | **required**: False | **type**: int @@ -276,9 +282,9 @@ dest_data_set key_length The key length to use when creating a KSDS data set. - \ :emphasis:`key\_length`\ is required when \ :emphasis:`type=ksds`\ . + *key_length* is required when *type=ksds*. - \ :emphasis:`key\_length`\ should only be provided when \ :emphasis:`type=ksds`\ + *key_length* should only be provided when *type=ksds* | **required**: False | **type**: int @@ -327,7 +333,7 @@ dest_data_set tmp_hlq Override the default high level qualifier (HLQ) for temporary data sets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. | **required**: False | **type**: str @@ -342,9 +348,9 @@ force remote_src - If set to true, \ :literal:`zos\_unarchive`\ retrieves the archive from the remote system. + If set to true, ``zos_unarchive`` retrieves the archive from the remote system. - If set to false, \ :literal:`zos\_unarchive`\ searches the local machine (Ansible controller) for the archive. + If set to false, ``zos_unarchive`` searches the local machine (Ansible controller) for the archive. | **required**: False | **type**: bool @@ -385,6 +391,13 @@ Examples - USER.ARCHIVE.TEST1 - USER.ARCHIVE.TEST2 + # Unarchive a GDS + - name: Unarchive a terse data set and excluding data sets from unpacking. + zos_unarchive: + src: "USER.ARCHIVE(0)" + format: + name: terse + # List option - name: List content from XMIT zos_unarchive: @@ -404,7 +417,7 @@ Notes .. note:: VSAMs are not supported. - This module uses \ `zos\_copy <./zos_copy.html>`__\ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. diff --git a/docs/source/modules/zos_volume_init.rst b/docs/source/modules/zos_volume_init.rst index a2b6f25ab..5647ad998 100644 --- a/docs/source/modules/zos_volume_init.rst +++ b/docs/source/modules/zos_volume_init.rst @@ -17,14 +17,14 @@ zos_volume_init -- Initialize volumes or minidisks. Synopsis -------- - Initialize a volume or minidisk on z/OS. -- \ :emphasis:`zos\_volume\_init`\ will create the volume label and entry into the volume table of contents (VTOC). +- *zos_volume_init* will create the volume label and entry into the volume table of contents (VTOC). - Volumes are used for storing data and executable programs. - A minidisk is a portion of a disk that is linked to your virtual machine. - A VTOC lists the data sets that reside on a volume, their location, size, and other attributes. -- \ :emphasis:`zos\_volume\_init`\ uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class \`STGADMIN.ICK.INIT\`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. -- ICKDSF is an Authorized Program Facility (APF) program on z/OS, \ :emphasis:`zos\_volume\_init`\ will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. +- *zos_volume_init* uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class `STGADMIN.ICK.INIT`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. +- ICKDSF is an Authorized Program Facility (APF) program on z/OS, *zos_volume_init* will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. - Note that defaults set on target z/OS systems may override ICKDSF parameters. -- If is recommended that data on the volume is backed up as the \ :emphasis:`zos\_volume\_init`\ module will not perform any backups. You can use the \ `zos\_backup\_restore <./zos_backup_restore.html>`__\ module to backup a volume. +- If is recommended that data on the volume is backed up as the *zos_volume_init* module will not perform any backups. You can use the `zos_backup_restore <./zos_backup_restore.html>`_ module to backup a volume. @@ -35,9 +35,9 @@ Parameters address - \ :emphasis:`address`\ is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. + *address* is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. - \ :emphasis:`address`\ can be the number assigned to the device (device number) when it is installed or the virtual address. + *address* can be the number assigned to the device (device number) when it is installed or the virtual address. | **required**: True | **type**: str @@ -46,15 +46,15 @@ address verify_volid Verify that the volume serial matches what is on the existing volume or minidisk. - \ :emphasis:`verify\_volid`\ must be 1 to 6 alphanumeric characters or \ :literal:`\*NONE\*`\ . + *verify_volid* must be 1 to 6 alphanumeric characters or ``*NONE*``. - To verify that a volume serial number does not exist, use \ :emphasis:`verify\_volid=\*NONE\*`\ . + To verify that a volume serial number does not exist, use *verify_volid=*NONE**. - If \ :emphasis:`verify\_volid`\ is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. + If *verify_volid* is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. - If \ :emphasis:`verify\_volid=\*NONE\*`\ is specified and a volume serial is found on the volume or minidisk, initialization does not complete. + If *verify_volid=*NONE** is specified and a volume serial is found on the volume or minidisk, initialization does not complete. - Note, this option is \ :strong:`not`\ a boolean, leave it blank to skip the verification. + Note, this option is **not** a boolean, leave it blank to skip the verification. | **required**: False | **type**: str @@ -73,11 +73,11 @@ volid Expects 1-6 alphanumeric, national ($,#,@) or special characters. - A \ :emphasis:`volid`\ with less than 6 characters will be padded with spaces. + A *volid* with less than 6 characters will be padded with spaces. - A \ :emphasis:`volid`\ can also be referred to as volser or volume serial number. + A *volid* can also be referred to as volser or volume serial number. - When \ :emphasis:`volid`\ is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. + When *volid* is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. | **required**: False | **type**: str @@ -99,7 +99,7 @@ index The VTOC index enhances the performance of VTOC access. - When set to \ :emphasis:`false`\ , no index will be created. + When set to *false*, no index will be created. | **required**: False | **type**: bool @@ -109,7 +109,7 @@ index sms_managed Specifies that the volume be managed by Storage Management System (SMS). - If \ :emphasis:`sms\_managed`\ is \ :emphasis:`true`\ then \ :emphasis:`index`\ must also be \ :emphasis:`true`\ . + If *sms_managed* is *true* then *index* must also be *true*. | **required**: False | **type**: bool @@ -127,7 +127,7 @@ verify_volume_empty tmp_hlq Override the default high level qualifier (HLQ) for temporary and backup datasets. - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value \ :literal:`TMPHLQ`\ is used. + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. | **required**: False | **type**: str diff --git a/docs/source/release_notes.rst b/docs/source/release_notes.rst index c8c2f6e96..45f3f100a 100644 --- a/docs/source/release_notes.rst +++ b/docs/source/release_notes.rst @@ -6,6 +6,110 @@ Releases ======== +Version 1.11.0-beta.1 +===================== + +Minor Changes +------------- + +- ``zos_apf`` - Added support that auto-escapes 'library' names containing symbols. +- ``zos_archive`` - Added support for GDG and GDS relative name notation to archive data sets. Added support for data set names with special characters like $, /#, /- and @. +- ``zos_backup_restore`` - Added support for GDS relative name notation to include or exclude data sets when operation is backup. Added support for data set names with special characters like $, /#, and @. +- ``zos_blockinfile`` - Added support for GDG and GDS relative name notation to specify a data set. And backup in new generations. Added support for data set names with special characters like $, /#, /- and @. +- ``zos_copy`` - Added support for copying from and copying to generation data sets (GDS) and generation data groups (GDG) including using a GDS for backup. +- ``zos_data_set`` - Added support for GDG and GDS relative name notation to create, delete, catalog and uncatalog a data set. Added support for data set names with special characters like $, /#, /- and @. +- ``zos_encode`` - Added support for converting the encodings of generation data sets (GDS). Also added support to backup into GDS. +- ``zos_fetch`` - Added support for fetching generation data groups (GDG) and generation data sets (GDS). Added support for specifying data set names with special characters like $, /#, /- and @. +- ``zos_find`` - Added support for finding generation data groups (GDG) and generation data sets (GDS). Added support for specifying data set names with special characters like $, /#, /- and @. +- ``zos_job_submit`` + + - Improved the mechanism for copying to remote systems by removing the use of deepcopy, which had previously resulted in the module failing on some systems. + - Added support for running JCL stored in generation data groups (GDG) and generation data sets (GDS). + +- ``zos_lineinfile`` - Added support for GDG and GDS relative name notation to specify the target data set and to backup into new generations. Added support for data set names with special characters like $, /#, /- and @. +- ``zos_mount`` - Added support for data set names with special characters ($, /#, /- and @). +- ``zos_mvs_raw`` - Added support for GDG and GDS relative name notation to specify data set names. Added support for data set names with special characters like $, /#, /- and @. +- ``zos_script`` - Improved the mechanism for copying to remote systems by removing the use of deepcopy, which had previously resulted in the module failing on some systems. +- ``zos_tso_command`` - Added support for using GDG and GDS relative name notation in running TSO commands. Added support for data set names with special characters like $, /#, /- and @. +- ``zos_unarchive`` + + - Added support for data set names with special characters like $, /#, /- and @. + - Improved the mechanism for copying to remote systems by removing the use of deepcopy, which had previously resulted in the module failing on some systems. + +Bugfixes +-------- + +- ``zos_copy`` + + - a regression in version 1.4.0 made the module stop automatically computing member names when copying a single file into a PDS/E. Fix now lets a user copy a single file into a PDS/E without adding a member in the dest option. + - module would use opercmd to check if a non existent destination data set is locked. Fix now only checks if the destination is already present. + +- ``zos_data_set`` - When checking if a data set is cataloged, module failed to account for exceptions which occurred during the LISTCAT. The fix now raises an MVSCmdExecError if the return code from LISTCAT is too high. +- ``zos_job_submit`` - The module was not propagating any error types including UnicodeDecodeError, JSONDecodeError, TypeError, KeyError when encountered. The fix now shares the type error in the error message. +- ``zos_mvs_raw`` - The first character of each line in dd_output was missing. The fix now includes the first character of each line. + +Availability +------------ + +* `Galaxy`_ +* `GitHub`_ + +Requirements +------------ + +The IBM z/OS core collection has several dependencies, please review the `z/OS core support matrix`_ to understand both the +controller and z/OS managed node dependencies. + +Known Issues +------------ +- ``zos_job_submit`` - when setting 'location' to 'local' and not specifying the from and to encoding, the modules defaults are not read leaving the file in its original encoding; explicitly set the encodings instead of relying on the default. +- ``zos_job_submit`` - when submitting JCL, the response value returned for **byte_count** is incorrect. +- ``zos_apf`` - When trying to remove a library that contains the '$' character in the name from APF(authorized program facility), operation will fail. +- In the past, choices could be defined in either lower or upper case. Now, only the case that is identified in the docs can be set, this is so that the collection can continue to maintain certified status. + + +Version 1.9.2 +============= + +Bugfixes +-------- + +- ``zos_copy`` - when creating the destination data set, the module would unnecessarily check if a data set is locked by another process. The module no longer performs this check when it creates the data set. + +Availability +------------ + +* `Automation Hub`_ +* `Galaxy`_ +* `GitHub`_ + +Requirements +------------ + +The IBM z/OS core collection has several dependencies, please review the `z/OS core support matrix`_ to understand both the +controller and z/OS managed node dependencies. + +Known Issues +------------ + +- ``zos_job_submit`` - when setting 'location' to 'LOCAL' and not specifying the from and to encoding, the modules defaults are not read leaving the file in its original encoding; explicitly set the encodings instead of relying on the default. +- ``zos_job_submit`` - when submitting JCL, the response value returned for **byte_count** is incorrect. + +- ``zos_job_submit``, ``zos_job_output``, ``zos_operator_action_query`` - encounters UTF-8 decoding errors when interacting with results that contain non-printable UTF-8 characters in the response. This has been addressed in this release and corrected with **ZOAU version 1.2.5.6** or later. + + - If the appropriate level of ZOAU can not be installed, some options are to: + + - Specify that the ASA assembler option be enabled to instruct the assembler to use ANSI control characters instead of machine code control characters. + - Ignore module errors by using **ignore_errors:true** for a specific playbook task. + - If the error is resulting from a batch job, add **ignore_errors:true** to the task and capture the output into a registered variable to extract the + job ID with a regular expression. Then use ``zos_job_output`` to display the DD without the non-printable character such as the DD **JESMSGLG**. + - If the error is the result of a batch job, set option **return_output** to false so that no DDs are read which could contain the non-printable UTF-8 characters. + +- ``zos_data_set`` - An undocumented option **size** was defined in module **zos_data_set**, this has been removed to satisfy collection certification, use the intended and documented **space_primary** option. + +- In the past, choices could be defined in either lower or upper case. Now, only the case that is identified in the docs can be set, this is so that the collection can continue to maintain certified status. + + Version 1.10.0 ============== @@ -134,19 +238,6 @@ Bugfixes - ``zos_find`` - Option size failed if a PDS/E matched the pattern, now filtering on utilized size for a PDS/E is supported. - ``zos_mvs_raw`` - Option **tmp_hlq** when creating temporary data sets was previously ignored, now the option honors the High Level Qualifier for temporary data sets created during the module execution. -Availability ------------- - -* `Automation Hub`_ -* `Galaxy`_ -* `GitHub`_ - -Requirements ------------- - -The IBM z/OS core collection has several dependencies, please review the `z/OS core support matrix`_ to understand both the -controller and z/OS managed node dependencies. - Known Issues ------------ @@ -165,7 +256,18 @@ Known Issues - ``zos_data_set`` - An undocumented option **size** was defined in module **zos_data_set**, this has been removed to satisfy collection certification, use the intended and documented **space_primary** option. -- In the past, choices could be defined in either lower or upper case. Now, only the case that is identified in the docs can be set, this is so that the collection can continue to maintain certified status. +Availability +------------ + +* `Automation Hub`_ +* `Galaxy`_ +* `GitHub`_ + +Requirements +------------ + +The IBM z/OS core collection has several dependencies, please review the `z/OS core support matrix`_ to understand both the +controller and z/OS managed node dependencies. Version 1.9.0 ============= diff --git a/docs/source/resources/releases_maintenance.rst b/docs/source/resources/releases_maintenance.rst index 391456769..9a5adbce8 100644 --- a/docs/source/resources/releases_maintenance.rst +++ b/docs/source/resources/releases_maintenance.rst @@ -89,6 +89,11 @@ The z/OS managed node includes several shells, currently the only supported shel +---------+----------------------------+---------------------------------------------------+---------------+---------------+ | Version | Controller | Managed Node | GA | End of Life | +=========+============================+===================================================+===============+===============+ +| 1.11.x |- `ansible-core`_ >=2.15.x |- `z/OS`_ V2R4 - V3Rx | In preview | TBD | +| |- `Ansible`_ >=8.0.x |- `z/OS shell`_ | | | +| |- `AAP`_ >=2.4 |- IBM `Open Enterprise SDK for Python`_ | | | +| | |- IBM `Z Open Automation Utilities`_ >=1.3.1 | | | ++---------+----------------------------+---------------------------------------------------+---------------+---------------+ | 1.10.x |- `ansible-core`_ >=2.15.x |- `z/OS`_ V2R4 - V2Rx | 21 June 2024 | 21 June 2026 | | |- `Ansible`_ >=8.0.x |- `z/OS shell`_ | | | | |- `AAP`_ >=2.4 |- IBM `Open Enterprise SDK for Python`_ | | | diff --git a/galaxy.yml b/galaxy.yml index 2e9d280dc..910442ef8 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -6,7 +6,7 @@ namespace: ibm name: ibm_zos_core # The collection version -version: "1.10.0" +version: "1.11.0-beta.1" # Collection README file readme: README.md diff --git a/meta/ibm_zos_core_meta.yml b/meta/ibm_zos_core_meta.yml index 5bc58ec94..16ee31ca9 100644 --- a/meta/ibm_zos_core_meta.yml +++ b/meta/ibm_zos_core_meta.yml @@ -1,5 +1,5 @@ name: ibm_zos_core -version: "1.10.0" +version: "1.11.0-beta.1" managed_requirements: - name: "IBM Open Enterprise SDK for Python" @@ -7,4 +7,4 @@ managed_requirements: - name: "Z Open Automation Utilities" version: - - ">=1.3.0" + - ">=1.3.1" diff --git a/plugins/action/zos_fetch.py b/plugins/action/zos_fetch.py index c3e4ec1ee..4d0a0c11b 100644 --- a/plugins/action/zos_fetch.py +++ b/plugins/action/zos_fetch.py @@ -276,7 +276,7 @@ def run(self, tmp=None, task_vars=None): local_checksum = _get_file_checksum(dest) # ********************************************************** # - # Fetch remote data. + # Fetch remote data. # # ********************************************************** # try: if ds_type in SUPPORTED_DS_TYPES: diff --git a/plugins/action/zos_script.py b/plugins/action/zos_script.py index e481052a5..d51c48ddf 100644 --- a/plugins/action/zos_script.py +++ b/plugins/action/zos_script.py @@ -1,4 +1,4 @@ -# Copyright (c) IBM Corporation 2023 +# Copyright (c) IBM Corporation 2023, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/plugins/doc_fragments/template.py b/plugins/doc_fragments/template.py index 1eea4ad3d..2215c0a4a 100644 --- a/plugins/doc_fragments/template.py +++ b/plugins/doc_fragments/template.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2022, 2023 +# Copyright (c) IBM Corporation 2022, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/plugins/doc_fragments/template.py-e b/plugins/doc_fragments/template.py-e new file mode 100644 index 000000000..af96f7b9d --- /dev/null +++ b/plugins/doc_fragments/template.py-e @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) IBM Corporation 2022, 2024 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r''' +options: + use_template: + description: + - Whether the module should treat C(src) as a Jinja2 template and + render it before continuing with the rest of the module. + - Only valid when C(src) is a local file or directory. + - All variables defined in inventory files, vars files and the playbook + will be passed to the template engine, + as well as L(Ansible special variables,https://docs.ansible.com/ansible/latest/reference_appendices/special_variables.html#special-variables), + such as C(playbook_dir), C(ansible_version), etc. + - If variables defined in different scopes share the same name, Ansible will + apply variable precedence to them. You can see the complete precedence order + L(in Ansible's documentation,https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence) + type: bool + default: false + template_parameters: + description: + - Options to set the way Jinja2 will process templates. + - Jinja2 already sets defaults for the markers it uses, you can find more + information at its L(official documentation,https://jinja.palletsprojects.com/en/latest/templates/). + - These options are ignored unless C(use_template) is true. + required: false + type: dict + suboptions: + variable_start_string: + description: + - Marker for the beginning of a statement to print a variable in Jinja2. + type: str + default: '{{' + variable_end_string: + description: + - Marker for the end of a statement to print a variable in Jinja2. + type: str + default: '}}' + block_start_string: + description: + - Marker for the beginning of a block in Jinja2. + type: str + default: '{%' + block_end_string: + description: + - Marker for the end of a block in Jinja2. + type: str + default: '%}' + comment_start_string: + description: + - Marker for the beginning of a comment in Jinja2. + type: str + default: '{#' + comment_end_string: + description: + - Marker for the end of a comment in Jinja2. + type: str + default: '#}' + line_statement_prefix: + description: + - Prefix used by Jinja2 to identify line-based statements. + type: str + required: false + line_comment_prefix: + description: + - Prefix used by Jinja2 to identify comment lines. + type: str + required: false + lstrip_blocks: + description: + - Whether Jinja2 should strip leading spaces from the start of a line + to a block. + type: bool + default: false + trim_blocks: + description: + - Whether Jinja2 should remove the first newline after a block is removed. + - Setting this option to C(False) will result in newlines being added to + the rendered template. This could create invalid code when working with + JCL templates or empty records in destination data sets. + type: bool + default: true + keep_trailing_newline: + description: + - Whether Jinja2 should keep the first trailing newline at the end of a + template after rendering. + type: bool + default: false + newline_sequence: + description: + - Sequence that starts a newline in a template. + type: str + default: '\\n' + choices: + - '\\n' + - '\\r' + - "\r\n" + auto_reload: + description: + - Whether to reload a template file when it has changed after the task + has started. + type: bool + default: false +''' diff --git a/plugins/module_utils/backup.py b/plugins/module_utils/backup.py index 8499361b6..716e0d3b2 100644 --- a/plugins/module_utils/backup.py +++ b/plugins/module_utils/backup.py @@ -139,7 +139,10 @@ def mvs_file_backup(dsn, bk_dsn=None, tmphlq=None): rc, out, err = _copy_pds(dsn, bk_dsn) if rc != 0: raise BackupError( - "Unable to backup data set {0} to {1}".format(dsn, bk_dsn) + "Unable to backup data set {0} to {1}.".format(dsn, bk_dsn), + rc=rc, + stdout=out, + stderr=err ) return bk_dsn diff --git a/plugins/module_utils/data_set.py b/plugins/module_utils/data_set.py index d3d8123c3..7b81fe2d1 100644 --- a/plugins/module_utils/data_set.py +++ b/plugins/module_utils/data_set.py @@ -1999,24 +1999,24 @@ def create(self, tmp_hlq=None, replace=True, force=False): Indicates if changes were made. """ arguments = { - "name" : self.name, - "raw_name" : self.raw_name, - "type" : self.data_set_type, - "space_primary" : self.space_primary, - "space_secondary" : self.space_secondary, - "space_type" : self.space_type, - "record_format" : self.record_format, - "record_length" : self.record_length, - "block_size" : self.block_size, - "directory_blocks" : self.directory_blocks, - "key_length" : self.key_length, - "key_offset" : self.key_offset, - "sms_storage_class" : self.sms_storage_class, - "sms_data_class" : self.sms_data_class, - "sms_management_class" : self.sms_management_class, - "volumes" : self.volumes, - "tmp_hlq" : tmp_hlq, - "force" : force, + "name": self.name, + "raw_name": self.raw_name, + "type": self.data_set_type, + "space_primary": self.space_primary, + "space_secondary": self.space_secondary, + "space_type": self.space_type, + "record_format": self.record_format, + "record_length": self.record_length, + "block_size": self.block_size, + "directory_blocks": self.directory_blocks, + "key_length": self.key_length, + "key_offset": self.key_offset, + "sms_storage_class": self.sms_storage_class, + "sms_data_class": self.sms_data_class, + "sms_management_class": self.sms_management_class, + "volumes": self.volumes, + "tmp_hlq": tmp_hlq, + "force": force, } formatted_args = DataSet._build_zoau_args(**arguments) changed = False @@ -2048,25 +2048,25 @@ def ensure_present(self, tmp_hlq=None, replace=False, force=False): Indicates if changes were made. """ arguments = { - "name" : self.name, - "raw_name" : self.raw_name, - "type" : self.data_set_type, - "space_primary" : self.space_primary, - "space_secondary" : self.space_secondary, - "space_type" : self.space_type, - "record_format" : self.record_format, - "record_length" : self.record_length, - "block_size" : self.block_size, - "directory_blocks" : self.directory_blocks, - "key_length" : self.key_length, - "key_offset" : self.key_offset, - "sms_storage_class" : self.sms_storage_class, - "sms_data_class" : self.sms_data_class, - "sms_management_class" : self.sms_management_class, - "volumes" : self.volumes, - "replace" : replace, - "tmp_hlq" : tmp_hlq, - "force" : force, + "name": self.name, + "raw_name": self.raw_name, + "type": self.data_set_type, + "space_primary": self.space_primary, + "space_secondary": self.space_secondary, + "space_type": self.space_type, + "record_format": self.record_format, + "record_length": self.record_length, + "block_size": self.block_size, + "directory_blocks": self.directory_blocks, + "key_length": self.key_length, + "key_offset": self.key_offset, + "sms_storage_class": self.sms_storage_class, + "sms_data_class": self.sms_data_class, + "sms_management_class": self.sms_management_class, + "volumes": self.volumes, + "replace": replace, + "tmp_hlq": tmp_hlq, + "force": force, } rc = DataSet.ensure_present(**arguments) self.set_state("present") diff --git a/plugins/module_utils/vtoc.py b/plugins/module_utils/vtoc.py index 309d73c1e..3cae4fd92 100644 --- a/plugins/module_utils/vtoc.py +++ b/plugins/module_utils/vtoc.py @@ -1,4 +1,4 @@ -# Copyright (c) IBM Corporation 2020 +# Copyright (c) IBM Corporation 2020, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/plugins/modules/zos_apf.py b/plugins/modules/zos_apf.py index 024ef8baa..ceeea04de 100644 --- a/plugins/modules/zos_apf.py +++ b/plugins/modules/zos_apf.py @@ -292,6 +292,7 @@ type: str ''' +import os import re import json from ansible.module_utils._text import to_text @@ -312,7 +313,7 @@ # supported data set types -DS_TYPE = ['PS', 'PO'] +DS_TYPE = data_set.DataSet.MVS_SEQ.union(data_set.DataSet.MVS_PARTITIONED) def backupOper(module, src, backup, tmphlq=None): @@ -340,11 +341,15 @@ def backupOper(module, src, backup, tmphlq=None): fail_json Creating backup has failed. """ - # analysis the file type - ds_utils = data_set.DataSetUtils(src) - file_type = ds_utils.ds_type() + file_type = None + if data_set.is_data_set(src): + file_type = data_set.DataSet.data_set_type(src) + else: + if os.path.exists(src): + file_type = 'USS' + if file_type != 'USS' and file_type not in DS_TYPE: - message = "{0} data set type is NOT supported".format(str(file_type)) + message = "Dataset {0} of type {1} is NOT supported".format(src, str(file_type)) module.fail_json(msg=message) # backup can be True(bool) or none-zero length string. string indicates that backup_name was provided. @@ -357,8 +362,17 @@ def backupOper(module, src, backup, tmphlq=None): backup_name = Backup.uss_file_backup(src, backup_name=backup, compress=False) else: backup_name = Backup.mvs_file_backup(dsn=src, bk_dsn=backup, tmphlq=tmphlq) + except Backup.BackupError as exc: + module.fail_json( + msg=exc.msg, + rc=exc.rc, + stdout=exc.stdout, + stderr=exc.stderr + ) except Exception: - module.fail_json(msg="creating backup has failed") + module.fail_json( + msg="An error ocurred during backup." + ) return backup_name diff --git a/plugins/modules/zos_archive.py b/plugins/modules/zos_archive.py index b9c825902..52fdd9585 100644 --- a/plugins/modules/zos_archive.py +++ b/plugins/modules/zos_archive.py @@ -380,7 +380,7 @@ format: name: terse format_options: - use_adrdssu: True + use_adrdssu: true - name: Archive multiple data sets into a new GDS zos_archive: @@ -389,7 +389,7 @@ format: name: terse format_options: - use_adrdssu: True + use_adrdssu: true ''' RETURN = r''' diff --git a/plugins/modules/zos_blockinfile.py b/plugins/modules/zos_blockinfile.py index a5fd05f45..ab6d2a0dd 100644 --- a/plugins/modules/zos_blockinfile.py +++ b/plugins/modules/zos_blockinfile.py @@ -39,7 +39,7 @@ PS (sequential data set), member of a PDS or PDSE, PDS, PDSE. - The USS file must be an absolute pathname. - Generation data set (GDS) relative name of generation already - created. C(e.g. SOME.CREATION(-1).) + created. ``e.g. SOME.CREATION(-1).`` type: str aliases: [ path, destfile, name ] required: true @@ -293,7 +293,7 @@ zos_blockinfile: src: SOME.CREATION.TEST insertbefore: BOF - backup: True + backup: true backup_name: CREATION.GDS(+1) block: "{{ CONTENT }}" ''' diff --git a/plugins/modules/zos_encode.py b/plugins/modules/zos_encode.py index 40b70a0fd..a17fcb7ed 100644 --- a/plugins/modules/zos_encode.py +++ b/plugins/modules/zos_encode.py @@ -616,7 +616,7 @@ def run_module(): result["dest"] = dest if ds_type_dest == "GDG": - raise EncodeError("Encoding of a whole generation data group is not yet supported.") + raise EncodeError("Encoding of a whole generation data group is not supported.") new_src = src_data_set.name if src_data_set else src new_dest = dest_data_set.name if dest_data_set else dest diff --git a/plugins/modules/zos_find.py b/plugins/modules/zos_find.py index 4bea0539d..e45595133 100644 --- a/plugins/modules/zos_find.py +++ b/plugins/modules/zos_find.py @@ -234,7 +234,6 @@ limit: 30 scratch: true purge: true - """ diff --git a/plugins/modules/zos_job_submit.py b/plugins/modules/zos_job_submit.py index e6e191060..d91b511c3 100644 --- a/plugins/modules/zos_job_submit.py +++ b/plugins/modules/zos_job_submit.py @@ -36,9 +36,9 @@ description: - The source file or data set containing the JCL to submit. - It could be a physical sequential data set, a partitioned data set - qualified by a member or a path (e.g. C(USER.TEST), V(USER.JCL(TEST\))), + qualified by a member or a path (e.g. C(USER.TEST), ``USER.JCL(TEST)``), or a generation data set from a generation data group - (for example, V(USER.TEST.GDG(-2\))). + (for example, ``USER.TEST.GDG(-2)``). - Or a USS file. (e.g C(/u/tester/demo/sample.jcl)) - Or a LOCAL file in ansible control node. (e.g C(/User/tester/ansible-playbook/sample.jcl)) diff --git a/plugins/modules/zos_lineinfile.py b/plugins/modules/zos_lineinfile.py index d3aa3b6b6..c5f262fe0 100644 --- a/plugins/modules/zos_lineinfile.py +++ b/plugins/modules/zos_lineinfile.py @@ -37,7 +37,7 @@ PS (sequential data set), member of a PDS or PDSE, PDS, PDSE. - The USS file must be an absolute pathname. - Generation data set (GDS) relative name of generation already - created. C(e.g. SOME.CREATION(-1).) + created. ``e.g. SOME.CREATION(-1).`` type: str aliases: [ path, destfile, name ] required: true @@ -251,7 +251,7 @@ zos_lineinfile: src: SOME.CREATION.TEST insertafter: EOF - backup: True + backup: true backup_name: CREATION.GDS(+1) line: 'Should be a working test now' """ diff --git a/plugins/modules/zos_mvs_raw.py b/plugins/modules/zos_mvs_raw.py index e3c8d4c6d..0a9394b67 100644 --- a/plugins/modules/zos_mvs_raw.py +++ b/plugins/modules/zos_mvs_raw.py @@ -89,7 +89,7 @@ description: - The data set name. - A data set name can be a GDS relative name. - - When using GDS relative name and it is a positive generation, disposition new must be used. + - When using GDS relative name and it is a positive generation, I(disposition=new) must be used. type: str required: false type: @@ -708,7 +708,7 @@ description: - The data set name. - A data set name can be a GDS relative name. - - When using GDS relative name and it is a positive generation, disposition new must be used. + - When using GDS relative name and it is a positive generation, I(disposition=new) must be used. type: str required: false type: diff --git a/plugins/modules/zos_operator.py b/plugins/modules/zos_operator.py index 54817936d..b2d8c0c52 100644 --- a/plugins/modules/zos_operator.py +++ b/plugins/modules/zos_operator.py @@ -29,6 +29,7 @@ - "Demetrios Dimatos (@ddimatos)" - "Rich Parker (@richp405)" - "Oscar Fernando Flores (@fernandofloresg)" + - "Ivan Moreno (@rexemin)" options: cmd: description: @@ -38,6 +39,8 @@ - If the command contains any special characters ($, &, etc), they must be escaped using double backslashes like \\\\\\$. - For example, to display job by job name the command would be C(cmd:"\\$dj''HELLO''") + - By default, the command will be converted to uppercase before execution, to control this + behavior, see the I(case_sensitive) option below. type: str required: true verbose: @@ -58,6 +61,14 @@ type: int required: false default: 1 + case_sensitive: + description: + - If C(true), the command will not be converted to uppercase before + execution. Instead, the casing will be preserved just as it was + written in a task. + type: bool + required: false + default: false notes: - Commands may need to use specific prefixes like $, they can be discovered by issuing the following command C(D OPDATA,PREFIX). @@ -177,7 +188,7 @@ opercmd = ZOAUImportError(traceback.format_exc()) -def execute_command(operator_cmd, timeout_s=1, *args, **kwargs): +def execute_command(operator_cmd, timeout_s=1, preserve=False, *args, **kwargs): """ Executes an operator command. @@ -187,6 +198,8 @@ def execute_command(operator_cmd, timeout_s=1, *args, **kwargs): Command to execute. timeout : int Time until it stops whether it finished or not. + preserve : bool + Whether to tell opercmd to preserve the case in the command. *args : dict Some arguments to pass on. **kwargs : dict @@ -201,7 +214,7 @@ def execute_command(operator_cmd, timeout_s=1, *args, **kwargs): timeout_c = 100 * timeout_s start = timer() - response = opercmd.execute(operator_cmd, timeout=timeout_c, *args, **kwargs) + response = opercmd.execute(operator_cmd, timeout=timeout_c, preserve=preserve, *args, **kwargs) end = timer() rc = response.rc stdout = response.stdout_response @@ -228,6 +241,7 @@ def run_module(): cmd=dict(type="str", required=True), verbose=dict(type="bool", required=False, default=False), wait_time_s=dict(type="int", required=False, default=1), + case_sensitive=dict(type="bool", required=False, default=False), ) result = dict(changed=False) @@ -314,6 +328,7 @@ def parse_params(params): cmd=dict(arg_type="str", required=True), verbose=dict(arg_type="bool", required=False), wait_time_s=dict(arg_type="int", required=False), + case_sensitive=dict(arg_type="bool", required=False), ) parser = BetterArgParser(arg_defs) new_params = parser.parse_args(params) @@ -344,6 +359,7 @@ def run_operator_command(params): wait_s = params.get("wait_time_s") cmdtxt = params.get("cmd") + preserve = params.get("case_sensitive") use_wait_arg = False if zoau_version_checker.is_zoau_version_higher_than("1.2.4"): @@ -353,7 +369,7 @@ def run_operator_command(params): kwargs.update({"wait": True}) args = [] - rc, stdout, stderr, elapsed = execute_command(cmdtxt, timeout_s=wait_s, *args, **kwargs) + rc, stdout, stderr, elapsed = execute_command(cmdtxt, timeout_s=wait_s, preserve=preserve, *args, **kwargs) if rc > 0: message = "\nOut: {0}\nErr: {1}\nRan: {2}".format(stdout, stderr, cmdtxt) diff --git a/plugins/modules/zos_unarchive.py b/plugins/modules/zos_unarchive.py index 258d9972b..f5febbf90 100644 --- a/plugins/modules/zos_unarchive.py +++ b/plugins/modules/zos_unarchive.py @@ -36,7 +36,7 @@ - I(src) can be a USS file or MVS data set name. - USS file paths should be absolute paths. - MVS data sets supported types are C(SEQ), C(PDS), C(PDSE). - - GDS relative names are supported C(e.g. USER.GDG(-1)). + - GDS relative names are supported ``e.g. USER.GDG(-1)``. type: str required: true format: @@ -146,7 +146,7 @@ description: - A list of directories, files or data set names to extract from the archive. - - GDS relative names are supported C(e.g. USER.GDG(-1)). + - GDS relative names are supported ``e.g. USER.GDG(-1)``. - When C(include) is set, only those files will we be extracted leaving the remaining files in the archive. - Mutually exclusive with exclude. @@ -157,7 +157,7 @@ description: - List the directory and file or data set names that you would like to exclude from the unarchive action. - - GDS relative names are supported C(e.g. USER.GDG(-1)). + - GDS relative names are supported ``e.g. USER.GDG(-1)``. - Mutually exclusive with include. type: list elements: str diff --git a/scripts/ce.py b/scripts/ce.py new file mode 100644 index 000000000..75bb142ca --- /dev/null +++ b/scripts/ce.py @@ -0,0 +1,2089 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) IBM Corporation 2024 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Module CE is used to run ansible test cases concurrently to a pool of managed +nodes. This module is tailored to z/OS managed nodes and currently has a dependency +on a shell script and the managed venv's provided by the 'ac' tool. +""" + +# pylint: disable=line-too-long, too-many-lines, fixme, too-many-instance-attributes +# pylint: disable=redefined-builtin, too-many-public-methods,too-many-arguments, too-many-locals +# pylint: disable=consider-using-f-string, too-many-branches, too-many-statements + + +import argparse +import json +import sys +import subprocess +import textwrap +import threading +from enum import Enum +from threading import Lock +import time +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import as_completed +from contextlib import contextmanager +from datetime import datetime +from collections import OrderedDict, namedtuple +from typing import List, Tuple +from prettytable import PrettyTable, ALL +from modules.utils import get_test_cases + + +# ------------------------------------------------------------------------------ +# Enums +# ------------------------------------------------------------------------------ +class Status (Enum): + """ + Represents the online/offline status of a managed node. + + Attributes: + ONLINE : Status - The node is online. + OFFLINE : Status - The node is offline. + + Methods: + number() - Returns the integer value of the status. + string() - Returns the string representation of the status. + is_equal(other) - Checks if this status is equal to another status. + is_online() - Checks if this status is online. + default() - Returns the default status (ONLINE). + """ + + ONLINE=(1, "online") + OFFLINE=(0, "offline") + + def __str__(self) -> str: + """ + Convert the name of the project to lowercase when converting it to a string. + + Return: + str: The lowercase name of the project. + """ + return self.name.lower() + + def number(self) -> int: + """ + Returns the numerical element of the tuple. + 1 for ONLINE and 0 for OFFLINE. + + Return: + int: The numerical element of the tuple. + 1 for ONLINE and 0 for OFFLINE. + """ + return self.value[0] + + def string(self) -> str: + """ + Returns the string value contained in the tuple. + 'online' for ONLINE and 'offline' for OFFLINE. + + Return: + str: The string value contained in the tuple. + 'online' for ONLINE and 'offline' for OFFLINE. + """ + return self.value[1] + + def is_equal(self, other) -> bool: + """ + Checks if two tuples numerical value are the same. + + Parameters: + other (status): The other tuple to compare to. + + Return: + bool: True if the numerical tuple values are the same, False otherwise. + """ + return self.number() == other.number() + + def is_online(self) -> bool: + """ + Checks if the tuple is ONLINE, if it equates to 1 + + Return: + bool: True if the tuple is ONLINE, False otherwise. + """ + return self.number() == 1 + + @classmethod + def default(cls): + """ + Return default status of ONLINE. + + Return: + Status: Return the ONLINE status. + """ + return cls.ONLINE + +class State (Enum): + """ + This class represents the state of a job. It has three + possible values: success, failure, and exceeded-max-failure. + + Attributes: + SUCCESS (State): A job succeeded execution. + FAILURE (State): A job failed to execute. + EXCEEDED (State): A job has exceeded its maximum allowable. + failures and will no longer be run in the thread pool. + """ + SUCCESS=(1, "success") + FAILURE=(0, "failure") + EXCEEDED=(2, "exceeded-max-failure") + + def __str__(self) -> str: + """ + Returns the name of the state in uppercase letters. + + Return: + str: The name of the state in uppercase letters. + 'SUCCESS' a job succeeded execution. + 'FAILURE' a job failed to execute. + 'EXCEEDED' a job has exceeded its maximum allowable failures. + """ + return self.name.upper() + + + def number(self) -> int: + """ + Returns the numeric value of the state. + + Return: + int: The numeric value of the state. + 1 for 'SUCCESS' a job succeeded execution. + 2 for 'FAILURE' a job failed to execute. + 3 for 'EXCEEDED' a job has exceeded its maximum allowable failures. + """ + return self.value[0] + + def string(self) -> str: + """ + Returns the string representation of the state. + + Return: + str: The string value of the state. + 'success' a job succeeded execution. + 'failure' a job failed to execute. + 'exceeded-max-failure' a job has exceeded its maximum allowable failures. + """ + return self.value[1] + + def is_equal(self, other: Enum) -> bool: + """ + Checks if this state is equal to another state by comparing + the numerical values for the two states. + + Args: + other (State): The other state to compare to. + + Return: + bool: True if the states are equal, False otherwise. + """ + return self.number() == other.number() + + def is_success(self) -> bool: + """ + Checks if this state is successful (SUCCESS) by + ensuring the numerical value is 1. + + Return: + bool: True if the state is successful, False otherwise. + """ + return self.number() == 1 + + def is_failure(self) -> bool: + """ + Checks if this state is a failure (FAILURE) by + ensuring the numerical value is 0. + + Return: + bool: True if the state is a failure, False otherwise. + """ + return self.number() == 0 + + def is_balanced(self) -> bool: + """ + Checks if this state has exceeded (EXCEEDED) by + ensuring the numerical value is 2. + + Return: + bool: True if the state has exceeded, False otherwise. + """ + return self.number() == 2 + +# ------------------------------------------------------------------------------ +# Class Dictionary +# ------------------------------------------------------------------------------ + +class Dictionary(): + """ + This is a wrapper class around a dictionary that provides additional locks + and logic for when interacting with any of the entries being accessed by + a thread pool to ensure safe access. + """ + + def __init__(self): + self._shared_dictionary = {} + self._lock = Lock() + + @contextmanager + def _acquire_with_timeout(self, timeout: int = -1) -> bool: + """ + Acquires a lock with a timeout in milliseconds. + + Parameters: + timeout (int): The maximum time to wait for the lock in milliseconds. + If -1, waits indefinitely. + + Return: + bool: True if the lock was acquired, False otherwise. + """ + result = self._lock.acquire(timeout=timeout) + try: + yield result + finally: + if result: + self._lock.release() + + # Likely works but not tested but also saw no need for this. + # def remove_items(self, remove): + # for key in list(remove.keys()): + # with self._lock: + # if key in self._shared_dictionary: + # self._shared_dictionary.pop(key) + + def pop(self, key, timeout: int = 100) -> object: + """ + Removes the entry from the dictionary and returns it. + Entry will no longer in remain in the dictionary. + + Parameters: + key (str): The key of the item to remove. + timeout (int): The maximum time to wait for acquiring the lock. + Default is 100ms. + + Return: + object: The value of the removed item. + """ + with self._acquire_with_timeout(timeout) as acquired: + if acquired: + if self._shared_dictionary: + if key in self._shared_dictionary: + return self._shared_dictionary.pop(key) + return None + + def get(self, key, timeout: int = 10) -> object: + """ + Retrieves the value associated with the given key from the dictionary. + + Args: + key (str): The key of the entry to retrieve. + timeout (int): The maximum time to wait for the lock, in seconds. + Defaults to 10 seconds. + + Return: + Any: The value associated with the given key. + + Raises: + KeyError: If the key does not exist in the dictionary. + TimeoutError: If the lock cannot be acquired before the timeout expires. + """ + with self._acquire_with_timeout(timeout) as acquired: + if acquired: + return self._shared_dictionary[key] + return None + + def update(self, key, obj) -> None: + """ + Update the dictionary with a new entry, functions same as add(...). + If the entry exists, it will be replaced. + + Parameters: + key (str): The key for the dictionary entry. + obj (object): The object to be stored in the dictionary. + """ + with self._lock: + self._shared_dictionary[key]=obj + + def add(self, key, obj) -> None: + """ + Add an entry to the dictionary, functions same as update(...). + If the entry exists, it will be replaced. + + Parameters: + key (str): The key for the dictionary entry. + obj (object): The object to be stored in the dictionary. + """ + with self._lock: + self._shared_dictionary[key]=obj + + def items(self) -> None: + """ + Returns a tuple (key, value) for each entry in the dictionary. + + Returns: + A tuple containing the key and value of each entry in the dictionary. + """ + with self._lock: + return self._shared_dictionary.items() + + def len(self) -> int: + """ + Returns the length of the dictionary. + + Returns: + int: The length of the dictionary. + + Example: + .len() + """ + with self._lock: + return len(self._shared_dictionary) + + def keys(self) -> List[str]: + """ + Returns a list of all keys in the dictionary. + + Returns: + List[str]: A list of all keys in the shared dictionary. + """ + with self._lock: + return self._shared_dictionary.keys() + +# ------------------------------------------------------------------------------ +# Class job +# ------------------------------------------------------------------------------ +class Job: + """ + Job represents a unit of work that the ThreadPoolExecutor will execute. A job + maintains all necessary attributes to allow the test case to execute on a + z/OS managed node. + + Parameters: + hostname (str): Full hostname for the z/OS manage node the Ansible workload will be executed on. + nodes (str): Node object that represents a z/OS managed node and all its attributes. + testcase (str): The USS absolute path to a testcase using '/path/to/test_suite.py::test_case' + id (int): The id that will be assigned to this job, a unique identifier. The id will be used + as the key in a dictionary. + """ + + def __init__(self, hostname: str, nodes: Dictionary, testcase: str, id: int): + """ + Parameters: + hostname (str): Full hostname for the z/OS manage node the Ansible workload + will be executed on. + nodes (str): Node object that represents a z/OS managed node and all its + attributes. + testcase (str): The USS absolute path to a testcase using + '/path/to/test_suite.py::test_case' + id (int): The id that will be assigned to this job, a unique identifier. The id will + be used as the key in a dictionary. + """ + self._hostnames: list = [] + self._hostnames.append(hostname) + self._testcase: str = testcase + self._capture: str = None + self._failures: int = 0 + self._id: int = id + self._rc: int = -1 + self._successful: bool = False + self._elapsed: str = None + self._hostpattern: str = "all" + self._nodes: Dictionary = nodes + self._stdout_and_stderr: list[Tuple[str, str, str]] = [] + self._stdout: list[Tuple[str, str, str]] = [] + self._verbose: str = None + + def __str__(self) -> str: + """ + This function returns a string representation of the Job. + + Parameters: + self (Job): The Job object to be represented as a string. + + Returns: + A string representation of the Job object. + """ + temp = { + "_hostname": self.get_hostname(), + "_testcase": self._testcase, + "_capture": self._capture, + "_failures": self._failures, + "_id": self._id, + "_rc": self._rc, + "_successful": self._successful, + "_elapsed": self._elapsed, + "_hostpattern": self._hostpattern, + "_pytest-command": self.get_command(), + "verbose": self._verbose + } + + return str(temp) + + def get_command(self) -> str: + """ + Returns a command designed to run with the projects pytest fixture. The command + is created specifically based on the args defined, such as ZOAU or test cases to run. + + Parameters: + self (Job) An instance of the class containing the method. + + Returns: + str: A string representing the pytest command to be executed. + + Example Return: + pytest tests/functional/modules/test_zos_job_submit_func.py::test_job_submit_pds[location1]\ + --host-pattern=allNoneNone --zinventory-raw='{"host": "ec33025a.vmec.svl.ibm.com",\ + "user": "omvsadm", "zoau": "/zoau/v1.3.1",\ + "pyz": "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz",\ + "pythonpath": "/zoau/v1.3.1/lib/3.10", "extra_args": {"volumes": ["222222", "000000"]}}' + + """ + node_temp = self._nodes.get(self.get_hostname()) + node_inventory = node_temp.get_inventory_as_string() + + return f"""pytest {self._testcase} --host-pattern={self._hostpattern} + {self._capture if self._capture else ""} + {self._verbose if self._verbose else ""} --zinventory-raw='{node_inventory}'""" + + + def get_hostnames(self) -> list[str]: + """ + Return all hostnames that have been assigned to this job over time as a list. + Includes hostnames that later replaced with new hostnames because the host is + considered no longer functioning. + + Return: + list[str]: A list of all hosts. + """ + return self._hostnames + + def get_hostname(self) -> str: + """ + Return the current hostname assigned to this node, in other words, the active hostname. + + Return: + str: The current hostname assigned to this job. + """ + return self._hostnames[-1] + + def get_testcase(self) -> str: + """ + Return a pytest parametrized testcase that is assigned to this job. + Incudes absolute path, testcase, and parametrization, eg + + Return: + str: Returns absolute path, testcase, and parametrization, + eg + """ + return self._testcase + + def get_failure_count(self) -> int: + """ + Return the number of failed job executions that have occurred for this job. + Failures can be a result of the z/OS managed node, a bug in the test case or even a + connection issue. This is used for statistical purposes or reason to assign the test + to a new hostname. + + Return: + int: Number representing number of failed executions. + """ + return self._failures + + def get_rc(self) -> int: + """ + The return code for the jobs execution. + + Return: + int: + Return code 0 All tests were collected and passed successfully (pytest) + Return code 1 Tests were collected and run but some of the tests failed (pytest) + Return code 2 Test execution was interrupted by the user (pytest) + Return code 3 Internal error happened while executing tests (pytest) + Return code 4 pytest command line usage error (pytest) + Return code 5 No tests were collected (pytest) + Return code 6 No z/OS nodes available. + Return code 7 Re-balancing of z/OS nodes were performed + Return code 8 Job has exceeded permitted job failures + Return code 9 Job has exceeded timeout + """ + return self._rc + + def get_id(self) -> int: + """ + Returns the job id used as the key in the dictionary to identify the job. + + Return: + int: Id of the job + """ + return self._id + + def get_successful(self) -> bool: + """ + Returns True if the job has completed execution. + + Return: + bool: True if the job completed, otherwise False. + + See Also: + get_rc() - Returns 0 for success, otherwise non-zero. + """ + return self._successful + + def get_elapsed_time(self) -> str: + """ + Returns the elapsed time for this job, in other words, + how long it took this job to run. + + Return: + str: Time formatted as , eg 00:05:30.64 + """ + return self._elapsed + + def get_nodes(self) -> Dictionary: + """ + Returns a dictionary of all the z/OS managed nodes available. + z/OS managed nodes are passed to a job so that a job can + interact with the nodes configuration, for example, + if a job needs to mark the node as offline, it can easily + access the dictionary of z/OS managed nodes to do so. + + Return: + Dictionary[str, node]: Thread safe Dictionary of z/OS managed nodes. + """ + return self._nodes + + def get_stdout_and_stderr_msgs(self) -> list[Tuple[str, str, str]]: + """ + Return all stdout and stderr messages that have been assigned to + this job over time as a list. + + Return: + list[str]: A list of all stderr and stdout messages. + """ + return self._stdout_and_stderr + + def get_stdout_msgs(self) -> list[Tuple[str, str, str]]: + """ + Return all stdout messages that have been assigned to this job + over time as a list. + + Return: + list[str]: A list of all stderr and stdout messages. + """ + return self._stdout + + def get_stdout_and_stderr_msg(self) -> Tuple[str, str, str]: + """ + Return the current stdout and stderr message assigned to this node, in + other words, the last message resulting from this jobs execution. + + Return: + str: The current concatenated stderr and stdout message. + """ + return self._stdout_and_stderr[-1] + + def get_stdout_msg(self) -> Tuple[str, str, str]: + """ + Return the current stdout message assigned to this node, in other + words, the last message resulting from this jobs execution. + + Return: + str: The current concatenated stderr and stdout message. + """ + return self._stdout[-1] + + def set_rc(self, rc: int) -> None: + """ + Set the jobs return code obtained from execution. + + Parameters: + rc (int): Value that is returned from the jobs execution + """ + self._rc = rc + + def set_success(self) -> None: + """ + Mark the job as having completed successfully. + + Parameters: + completed (bool): True if the job has successfully returned + with a RC 0, otherwise False. + """ + self._successful = True + + def add_hostname(self, hostname: str) -> None: + """ + Set the hostname of where the job will be run. + + Parameters: + hostname (str): Hostname of the z/OS managed node. + """ + self._hostnames.append(hostname) + + def increment_failure(self) -> None: + """ + Increment the failure by 1 for this jobs. Each time the job + returns with a non-zero return code, increment the value + so this statistic can be reused in other logic. + """ + self._failures +=1 + + def set_elapsed_time(self, start_time: time) -> None: + """ + Set the start time to obtain the elapsed time this + job took to run. Should only set this when RC is zero. + + Parameters: + start_time (time): The time the job started. A start time should be + captured before the job is run, and passed to this + function after the job completes for accuracy of + elapsed time. + """ + self._elapsed = elapsed_time(start_time) + + def set_capture(self, capture: bool) -> None: + """ + Indicate if pytest should run with '-s', which will + show output and not to capture any output. Pytest + captures all output sent to stdout and stderr, + so you won't see the printed output in the console + when running tests unless a test fails. + """ + if capture is True: + self._capture = " -s" + + def set_verbose(self, verbosity: int) -> None: + """ + Indicate if pytest should run with verbosity to show + detailed console outputs and debug failing tests. + Verbosity is defined by the number of v's passed + to py test. + + If verbosity is outside of the numerical range, no + verbosity is set. + + Parameters: + int: Integer range 1 - 4 + 1 = -v + 2 = -vv + 3 = -vvv + 4 = -vvvv + """ + if verbosity == 1: + self._verbose = " -v" + elif verbosity == 2: + self._verbose = " -vv" + elif verbosity == 3: + self._verbose = " -vvv" + elif verbosity == 4: + self._verbose = " -vvvv" + + def set_stdout_and_stderr(self, message: str, std_out_err: str, date_time: str) -> None: + """ + Add a stdout and stderr concatenated message resulting from the jobs + execution (generally std out/err resulting from pytest) the job. + + Parameters: + message (str): Message associated with the stdout and stderr output. Message + describes the std_out_err entry. + stdout_stderr (str): Stdout and stderr concatenated into one string. + date_time (str): Date and time when the stdout and stderr output was generated. + """ + + Joblog = namedtuple('Joblog',['id', 'hostname', 'command', 'message', 'std_out_err', 'date_time']) + + joblog = Joblog(self._id, self._hostnames[-1], self.get_command(), message, std_out_err, date_time) + self._stdout_and_stderr.append(joblog) + + def set_stdout(self, message: str, std_out_err: str, date_time: str) -> None: + """ + Add a stdout concatenated message resulting from the jobs + execution (generally std out/err resulting from pytest) the job. + + Parameters: + message (str): Message associated with the stdout/stderr output. + stdout_stderr (str): Stdout and stderr concatenated into one string. + date_time (str): Date and time when the stdout/stderr was generated. + """ + Joblog = namedtuple('Joblog',['id', 'hostname', 'command', 'message', 'std_out_err', 'date_time']) + + joblog = Joblog(self._id, self._hostnames[-1], self.get_command(), message, std_out_err, date_time) + self._stdout.append(joblog) + +# ------------------------------------------------------------------------------ +# Class Node +# ------------------------------------------------------------------------------ + + +class Node: + """ + A z/OS node suitable for Ansible tests to execute. Attributes such as 'host', + 'zoau', 'user' and 'pyz' , etc are maintained in this class instance because + these attributes can vary between nodes. These attributes are then used to + create a dictionary for use with pytest fixture 'zinventory-raw'. + + This node will also track the health of the node, whether its status.ONLINE + meaning its discoverable and useable or status.OFFLINE meaning over time, + since being status.ONLINE, it has been determined unusable and thus marked + as status.OFFLINE. + + Parameters: + hostname (str): Hostname for the z/OS managed node the Ansible workload + will be executed on. + user (str): The USS user who will run the Ansible workload on z/OS. + zoau (str): The USS absolute path to where ZOAU is installed. + pyz( str): The USS absolute path to where python is installed. + """ + + + def __init__(self, hostname: str, user: str, zoau: str, pyz: str, pythonpath: str, volumes: str): + """ + parser.add_argument('--pythonpath', type=str, help='Absolute path to the + ZOAU Python modules, precompiled or wheels.', required=True, + metavar='', default="") parser.add_argument('--volumes' + Parameters: + hostname (str): Hostname for the z/OS managed node the Ansible + workload + will be executed on. + user (str): The USS user who will run the Ansible workload on z/OS. + zoau (str): The USS absolute path to where ZOAU is installed. pyz( + str): The USS absolute path to where python is installed. + + """ + self._hostname: str = hostname + self._user: str = user + self._zoau: str = zoau + self._pyz: str = pyz + self._pythonpath: str = pythonpath + self._volumes: str = volumes + self._state: Status = Status.ONLINE + self._failures: set[int] = set() + self._balanced: set[int] = set() + self._inventory: dict [str, str] = {} + self._inventory.update({'host': self._hostname}) + self._inventory.update({'user': self._user}) + self._inventory.update({'zoau': self._zoau}) + self._inventory.update({'pyz': self._pyz}) + self._inventory.update({'pythonpath': self._pythonpath}) + self._extra_args = {} + self._extra_args.update({'extra_args':{'volumes':self._volumes.split(",")}}) + self._inventory.update(self._extra_args) + self._assigned = Dictionary() + self._failure_count: int = 0 + self._assigned_count: int = 0 + self._balanced_count: int = 0 + self._running_job_id: int = -1 + + def __str__(self) -> str: + """ + String representation of the Node class. Not every class + variable is returned, some of the dictionaries which track + state are large and should be accessed directly from those + class members. + """ + temp = { + "_hostname": self._hostname, + "_user": self._user, + "_zoau": self._zoau, + "_pyz": self._pyz, + "_pythonpath": self._pythonpath, + "_volumes": self._volumes, + "_state": str(self._state), + "inventory": self.get_inventory_as_string(), + "_failure_count": str(self._failure_count), + "_assigned_count": str(self._assigned_count), + "_balanced_count": str(self._balanced_count), + "_running_job_id": str(self._running_job_id) + } + return str(temp) + + def set_state(self, state: Status) -> None: + """ + Set status of the node, is the z/OS node ONLINE (useable) + or OFFLINE (not usable). + + Parameters: + state (Status): Set state to Status.ONLINE or Status.OFFLINE. + Use Status.ONLINE to signal the managed node is healthy, use + Status.OFFLINE to signal the managed node should not used + to run any jobs. + """ + self._state = state + + def set_failure_job_id(self, id: int) -> None: + """ + Update the node with any jobs which fail to run. If a job fails to run, + add the job ID to the nodes class. A Job failure occurs when the + execution of the job is a non-zero return code. + + Parameters: + id (int): The ID of the job that failed to run. + """ + self._failures.add(id) + self._failure_count = len(self._failures) + + def set_assigned_job(self, job: Job) -> None: + """ + Add a job to the Node that has been assigned to this node (z/OS managed node). + + Parameters: + job (Job): The job that has been assigned to this node. + """ + self._assigned.add(job.get_id(),job) + self._assigned_count +=1 + + def set_balanced_job_id(self, id: int) -> None: + """ + Add a jobs ID to the node, when a job has been rebalanced. + + Parameters: + id (int): The job ID to add to the set of balanced jobs. + """ + self._balanced.add(id) + + def set_running_job_id(self, running_job_id: int) -> None: + """ + Set the ID of the currently running job. + + Parameters: + running_job_id (int): The ID of the currently running job. + """ + self._running_job_id = running_job_id + + def get_state(self) -> Status: + """ + Get the z/OS manage node status. + + Return: + Status.ONLINE: If the z/OS managed node state is usable. + Status.OFFLINE: If the z/OS managed node state is unusable. + """ + return self._state + + def get_hostname(self) -> str: + """ + Get the hostname for this managed node. A node is a + z/OS host capable of running an Ansible unit of work. + + Return: + str: The managed nodes hostname. + """ + return self._hostname + + def get_user(self) -> str: + """ + Get the users id that is permitted to run an Ansible workload on + the managed node. + + Return: + str: Unix System Services (USS) user name + """ + return self._user + + def get_zoau(self) -> str: + """ + Get the ZOAU home directory path found on the managed node. + + Return: + str: Unix System Services (USS) absolute path of where + ZOAU is installed. + """ + return self._zoau + + def get_pyz(self) -> str: + """ + Get the Python home directory path found on the managed node. + + Return: + str: Unix System Services (USS) absolute path of where + python is installed. + """ + return self._pyz + + def get_inventory_as_string(self) -> str: + """ + Get a JSON string of the inventory that can be used with + the 'zinventory-raw' pytest fixture. This JSON string can be + passed directly to the option 'zinventory-raw', for example: + + pytest .... --zinventory-raw='{.....}' + + Return: + str: A JSON string of the managed node inventory attributes. + """ + return json.dumps(self._inventory) + + def get_inventory_as_dict(self) -> dict [str, str]: + """ + Get a dictionary that can be used with the 'zinventory-raw' + pytest fixture. This is the dict() not a string, you might + choose this so you can dynamically update the dictionary and + then use json.dumps(...) to convert it to string and pass it + to zinventory-raw'. + + Return: + dict [str, str]: A dictionary of the managed node + inventory attributes. + """ + return self._inventory + + def get_failure_jobs_as_dictionary(self) -> Dictionary: + """ + Get a Dictionary() of all jobs which have failed on this node. + + Return: + Dictionary[int, Job]: A Dictionary() of all Job(s) that have + been assigned and failed on this Node. + """ + return self._failures + + def get_assigned_jobs_as_string(self) -> str: + """ + Get a JSON string of all jobs which have been assigned to this node. + + Return: + str: A JSON string representation of a job. + """ + return json.dumps(self._assigned) + + def get_assigned_jobs_as_dictionary(self) -> Dictionary: + """ + Get a Dictionary of all jobs which have been assigned to this node. + + Return: + Dictionary[int, Job]: A Dictionary of all jobs which have + failed on this node. + """ + return self._assigned + + def get_failure_job_count(self) -> int: + """ + Get the numerical count of how many Job(s) have failed on this + Node with a non-zero return code. + + Returns: + int: The number of failed Jobs. + """ + return self._failure_count + + def get_assigned_job_count(self) -> int: + """ + Get the numerical count of how many Job(s) have been assigned + to this Node. + + Returns: + int: The number of Jobs assigned to this Node. + """ + return self._assigned_count + + def get_balanced_job_count(self) -> int: + """ + Get the numerical count of how many Job(s) have been + reassigned (balanced) to this Node. + + Returns: + int: The number of jobs which have been balanced onto + this node. + """ + self._balanced_count = len(self._balanced) + return self._balanced_count + + def get_running_job_id(self) -> int: + """ + Get the job id of the currently running job. + + Returns: + int: The job id of the currently running job. + """ + return self._running_job_id + +# ------------------------------------------------------------------------------ +# Helper methods +# ------------------------------------------------------------------------------ + +def get_jobs(nodes: Dictionary, paths:str, skip: str, capture: bool, verbosity: int, replay: bool = False) -> Dictionary: + """ get_test_cases(test_suites: str, test_directories: str = None, skip: str = None): + Get a thread safe dictionary of job(s). + A job represents a test case, a unit of work the ThreadPoolExecutor will run. + A job manages the state of a test case as well as the necessary information + to run on a z/OS managed node. + + Parameters: + paths (str): Absolute path of directories containing test suites or absolute + path of individual test suites comma or space delimited. + A directory of test cases is such that it contains test suites. + A test suite is a collection of test cases in a file that starts with + 'test' and ends in '.py'. + skip (str): (Optional) Absolute path of either test suites, or test cases. + Test cases can be parametrized such they use the '::' syntax or not. + Skip does not support directories. + + Raises: + FileNotFoundError : If a test suite, test case or skipped test cannot be found. + RuntimeError : When no z/OS managed hosts were online. + + Returns: + Dictionary [int, Job]: A thread safe Dictionary containing numeric keys (ID) with value + type Job, each Dictionary item is a testcase with supporting + attributes necessary to execute on a z/OS managed node. + """ + + hostnames=list(nodes.keys()) + hostnames_length = nodes.len() + parametrized_test_cases = [] + if hostnames_length == 0: + raise RuntimeError('No z/OS managed hosts were online, please check host availability.') + + # Thread safe dictionary of Jobs + jobs = Dictionary() + index = 0 + hostnames_index = 0 + + if not replay: + parametrized_test_cases = get_test_cases(paths, skip) + else: + parametrized_test_cases = paths.split(',') + + for parametrized_test_case in parametrized_test_cases: + + # Assign each job a hostname using round robin (modulus % division) + if hostnames_index % hostnames_length == 0: + hostnames_index = 0 + + # Create a job, add it jobs Dictionary, update node reference + hostname = hostnames[hostnames_index] + _job = Job(hostname = hostname, nodes = nodes, testcase=parametrized_test_case, id=index) + _job.set_verbose(verbosity) + _job.set_capture(capture) + jobs.update(index, _job) + nodes.get(hostname).set_assigned_job(_job) + index += 1 + hostnames_index += 1 + + # for key, value in jobs.items(): + # print(f"The job count = {str(jobs.len())}, job id = {str(key)} , job = {str(value)}") + + return jobs + + +def update_job_hostname(job: Job): + """ + Updates the job with a new hostname. Jobs rely on healthy hostnames and when + its determine that the z/OS hostname that is being accessed has become + incapable of addressing any unit of work, this method will append a new + z/os hostname for the job to execute its job on. This method ensures + that it is a randomly different node then the one previously assigned + to the job. + + This is referred to as re-balancing a jobs hostname, this happens when + a job has consistently failed N number of times. + + TODO: + - Iterate over all jobs looking for inactive ones and balance all of the + job nodes. - after giving this some thou + """ + + unsorted_items = {} + nodes = job.get_nodes() + + # We need the Jobs assigned host names (job.get_hostnames() -> list[str]) + set_of_nodes_assigned_to_job: set = set(job.get_hostnames()) + + set_of_nodes_online: set = set() + for key, value in job.get_nodes().items(): + if value.get_state().is_online(): + set_of_nodes_online.add(key) + + # The difference of all available z/OS zos_nodes and ones assigned to a job. + nodes_available_and_online = list(set_of_nodes_online - set_of_nodes_assigned_to_job) + + for hostname in nodes_available_and_online: + count = nodes.get(hostname).get_assigned_job_count() + unsorted_items[hostname] = count + + sorted_items_by_assigned = OrderedDict(sorted(unsorted_items.items(), key=lambda x: x[1])) + # for key, value in sorted_items_by_assigned.items(): + # print(f" Sorted by assigned are; key = {key}, value = {value}.") + + # After being sorted ascending, assign the first index which will have been the lease used connection. + if len(sorted_items_by_assigned) > 0: + hostname = list(sorted_items_by_assigned)[0] + job.add_hostname(hostname) + nodes.get(hostname).set_assigned_job(job) + + +def get_nodes(user: str, zoau: str, pyz: str, hostnames: list[str] = None, pythonpath: str = None, volumes: str = None) -> Dictionary: + """ + Get a thread safe Dictionary of active z/OS managed nodes. + + Parameters: + user (str): The USS user name who will run the Ansible workload on z/OS. + zoau (str): The USS absolute path to where ZOAU is installed. + pyz (str): The USS absolute path to where python is installed. + + Returns: + Dictionary [str, Node]: Thread safe Dictionary containing all the active z/OS managed nodes. + The dictionary key will be the z/OS managed node's hostname and the value + will be of type Node. + """ + nodes = Dictionary() + + if hostnames is None: + hostnames = [] + + # Calling venv.sh directly to avoid the ac dependency, ac usually lives in project root so an + # additional arg would have to be passed like so: "cd ..;./ac --host-nodes --all false" + result = subprocess.run(["echo `./venv.sh --targets-production`"], shell=True, capture_output=True, text=True, check=False) + hostnames = result.stdout.split() + else: + hostnames = hostnames[0].split(',') + + # Prune any production system that fails to ping + for hostname in hostnames: + command = ['ping', '-c', '1', hostname] + result = subprocess.run(args=command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=False) + + # TODO: Use the connection class to connection and validate ZOAU and Python before adding the nodes + if result.returncode == 0: + node=Node(hostname = hostname, user = user, zoau = zoau, pyz = pyz, pythonpath = pythonpath, volumes = volumes) + node.set_state(Status.ONLINE) + nodes.update(key = hostname, obj = node) + + return nodes + +def get_nodes_online_count(nodes: Dictionary) -> int: + """ + Get a count of how many managed Node(s) have status that is equal to Status.ONLINE. + A value greater than or equal to 1 signifies that Job(s) can continue to execute, + otherwise there are no managed nodes capable or running a job. + + A Node is set to Status.OFFLINE when the value used for --bal (balance) is + surpassed. Balance (--bal) is used to signal that Job has run N number of times + on a particular host and had a non-zero return code and should be used by any other Job. + + Parameters: + nodes (Dictionary [ str, node]): Thread safe dictionary z/OS managed nodes. + + Returns: + int: The numerical count of nodes that are online. + """ + nodes_online_count = 0 + for _, value in nodes.items(): + if value.get_state().is_online(): + nodes_online_count += 1 + + return nodes_online_count + +def get_nodes_offline_count(nodes: Dictionary) -> int: + """ + Get a count of how many managed Node(s) have status that is equal to Status.OFFLINE. + A value greater than or equal to 1 signifies that Job(s) have failed to run on this + node and that this node should not be used any further. + + A Node is set to Status.OFFLINE when the value used for --bal (balance) is + surpassed. Balance (--bal) is used to signal that Job has run N number of times + on a particular host and had a non-zero return code and should be used by any other Job. + + Parameters: + nodes (dictionary [ str, node]) Thread safe dictionary z/OS managed nodes. + + Returns: + int - The numerical count of nodes that are offline. + """ + nodes_offline_count = 0 + for _, value in nodes.items(): + if not value.get_state().is_online(): + nodes_offline_count += 1 + + return nodes_offline_count + +# def set_nodes_offline(nodes: Dictionary, maxnode: int) -> None: +# for key, value in nodes.items(): +# if value.get_balanced_count() > maxnode: +# value.set_state(Status.OFFLINE) + +def set_node_offline(node: Node, maxnode: int) -> None: + """ + Sets a node offline if it has exceeded maxnode, the number of permitted + balanced jobs for a node. 'maxnode' is defined as the maximum number of + times a node can fail to run a job before its set to 'offline' indicating + the node is no longer suitable for job execution. + + Parameters: + node (Node): The node to check for balanced jobs. + maxnode (int): The maximum number of balanced jobs + allowed on a node before it is set offline. + """ + if node.get_balanced_job_count() > maxnode: + node.set_state(Status.OFFLINE) + +def get_jobs_statistics(jobs: Dictionary, maxjob: int) -> Tuple[int, list[str], int, list[str], int, list[str], list[str], int, int, list[str], list[str]]: + """ + Collect result data that can be used to generate a log/history of the + programs execution, such as how many jobs ran, how many failed, etc. + + Parameters: + jobs (Dictionary [int, job]) - A dictionary of jobs keyed by their id. + maxjob (int): The maximum number of times a job can fail before its disabled + in the job queue. + + Returns + jobs_total_count (int): The number of jobs that have been scheduled to run. + jobs_success_tests (ist[str]): A list of test cases that were successful. + jobs_success_log (list[str]): A list of log messages associated with the + successful test cases. + jobs_failed_count (int): The total number of jobs that failed. + jobs_failed_tests (list[str]): A list of test cases that failed. + jobs_failed_log: (list[str]): A list of log messages associated with the + failed test cases. + jobs_rebalanced_count (int): The total number of jobs that had their + hostnames rebalanced. + jobs_failed_count_maxjob (int): The total number of jobs that failed + multiple times (exceeded maxjob). + jobs_failed_maxjob_tests (list[str]): A list of test cases that failed + multiple times (exceeded maxjob). + jobs_failed_maxjob_log (list[str]): A list of log messages associated with + the failed test cases that exceeded maxjob. + + Example: + >>>> stats = get_jobs_statistics(jobs, args.maxjob) + >>>> print(f" {stats.jobs_success_count}, {stats.jobs_total_count}, etc) + + Raises: + TypeError: + - If the input argument jobs is not a dictionary + - If any of the values in the jobs dictionary are not instances of the Job class + """ + jobs_success_count = 0 + jobs_success_tests = [] + jobs_failed_count = 0 + jobs_failed_tests = [] + jobs_total_count = 0 + jobs_success_log = [] + jobs_failed_log = [] + jobs_rebalanced_count = 0 + jobs_failed_count_maxjob = 0 + jobs_failed_maxjob_tests =[] + jobs_failed_maxjob_log = [] + + for _, value in jobs.items(): + # Total count of jobs (same as len(jobs)) + jobs_total_count +=1 + + # Total of jobs that have been rebalanced + if len(value.get_hostnames()) > 1: + jobs_rebalanced_count +=1 + + # Total of jobs have a successful status + if value.get_successful(): + jobs_success_count += 1 + jobs_success_tests.append(value.get_testcase()) + jobs_success_log.extend(value.get_stdout_msgs()) + else: + # Total of jobs that have a failure status + if not value.get_successful(): + jobs_failed_count += 1 + jobs_failed_tests.append(value.get_testcase()) + jobs_failed_log.extend(value.get_stdout_and_stderr_msgs()) + # Total of jobs that have failure status and exceeded maxjob, this + # differs from the total of that have a failure status in that maxjob + # has exceeded, while a job can fail and never exceed maxjob because + # there are no healthy z/OS managed nodes to execute on. + if value.get_failure_count() >= maxjob: + jobs_failed_count_maxjob += 1 + jobs_failed_maxjob_tests.append(value.get_testcase()) + jobs_failed_maxjob_log.extend(value.get_stdout_and_stderr_msgs()) + + Statistics = namedtuple('Statistics', + ['jobs_total_count', + 'jobs_success_count', + 'jobs_success_tests', + 'jobs_success_log', + 'jobs_failed_count', + 'jobs_failed_tests', + 'jobs_failed_log', + 'jobs_rebalanced_count', + 'jobs_failed_count_maxjob', + 'jobs_failed_maxjob_tests', + 'jobs_failed_maxjob_log']) + result = Statistics(jobs_total_count, + jobs_success_count, + jobs_success_tests, + jobs_success_log, + jobs_failed_count, + jobs_failed_tests, + jobs_failed_log, + jobs_rebalanced_count, + jobs_failed_count_maxjob, + jobs_failed_maxjob_tests, + jobs_failed_maxjob_log) + + return result + +def get_failed_count_gt_maxjob(jobs: Dictionary, maxjob: int) -> Tuple[int, list[str], dict[int, str], int]: + """ + This function takes in a dictionary of jobs and a maximum job failure count threshold, and returns a tuple containing: + 1. The number of jobs that have failed more than the maximum job failure count threshold. + 2. A list of test cases for those jobs that have failed more than the maximum job failure count threshold. + 3. A dictionary mapping each failed job's ID to its stdout and stderr messages. + 4. The number of jobs that were rebalanced after the maximum job failure count threshold was exceeded. + + Parameters: + jobs (Dictionary): A dictionary mapping job IDs to Job objects. + maxjob (int): The maximum number of times a job can fail before it is considered a failure. + + Returns: + Tuple[int, list[str], dict[int, str], int]: A tuple containing the number of jobs that have + failed more than the maximum job failure count threshold, a list of test cases for those + jobs that have failed more than the maximum job failure count threshold, a dictionary + mapping each failed job's ID to its stdout and stderr messages, and the number of jobs + that were rebalanced after the maximum job failure count threshold was exceeded. + """ + jobs_failed_count = 0 + jobs_failed_list = [] + jobs_failed_log = [] + jobs_rebalanced = 0 + for key, value in jobs.items(): + if value.get_failure_count() >= maxjob: + jobs_failed_count += 1 + jobs_failed_list.append(value.get_testcase()) + jobs_failed_log.append({key : value.get_stdout_and_stderr_msgs()}) + if len(value.get_hostnames()) > 1: + jobs_rebalanced +=1 + #TODO: refactor these tuples to include gt or max to not confused with get jobs statistics + return (jobs_failed_count, jobs_failed_list, jobs_failed_log, jobs_rebalanced) + +def run(id: int, jobs: Dictionary, nodes: Dictionary, timeout: int, maxjob: int, bal: int, extra: str, maxnode: int, throttle: bool) -> Tuple[int, str]: + """ + Runs a job (test case) on a managed node and ensures the job has the necessary + managed node available. If not, it will manage the node and collect the statistics + so that it can be properly run when a resource becomes available. + + Parameters + id (int): Numerical ID assigned to a job. + jobs (Dictionary): A dictionary of jobs, the ID is paired to a job. + A job is a test cased designed to be run by pytest. + nodes (Dictionary): Managed nodes that jobs will run on. These are z/OS + managed nodes. + timeout (int):The maximum time in seconds a job should run on z/OS for, + default is 300 seconds. + maxjob (int): The maximum number of times a job can fail before its + disabled in the job queue + bal (int): The count at which a job is balanced from one z/OS node + to another for execution. + extra (str): Extra commands passed to subprocess before pytest execution + maxnode (int): The maximum number of times a node can fail to run a + job before its set to 'offline' in the node queue. + + Returns: + A tuple of (rc: int, message: str) is returned. + rc (int): + Return code 0 All tests were collected and passed successfully (pytest). + Return code 1 Tests were collected and run but some of the tests failed (pytest). + Return code 2 Test execution was interrupted by the user (pytest). + Return code 3 Internal error happened while executing tests (pytest). + Return code 4 pytest command line usage error (pytest). + Return code 5 No tests were collected (pytest). + Return code 6 No z/OS nodes available. + Return code 7 Re-balancing of z/OS nodes were performed. + Return code 8 Job has exceeded permitted job failures. + Return code 9 Job has exceeded timeout. + Return code 10 Job is being passed over because the node that + is going to run the job is executing another job. + + message (str): Description and details of the jobs execution, contains + return code, hostname, job id, etc. Informational and useful when + understanding the job's lifecycle. + """ + + job = jobs.get(id) + hostname = job.get_hostname() + #id = str(job.get_id()) + elapsed = 0 + message = None + rc = None + result = None + + node_count_online = get_nodes_online_count(nodes) + if node_count_online > 0: + node = nodes.get(hostname) + # Temporary solution to avoid nodes running concurrent work loads + # if get_nodes_offline_count(nodes) == 0 and node.get_running_job_id() == -1: + # TODO: Why check if there are no offline nodes , feels like node.get_running_job_id() would have been enough. + + if throttle and (node.get_running_job_id() != -1): + rc = 10 + job.set_rc(rc) + nodes_count = nodes.len() + node_count_offline = get_nodes_offline_count(nodes) + #other = node.get_assigned_jobs_as_dictionary().get(id) + date_time = datetime.now().strftime("%H:%M:%S") #("%d/%m/%Y %H:%M:%S") + rsn = f"Managed node is not able to execute job id={node.get_running_job_id()}, nodes={nodes_count}, offline={node_count_offline}, online={node_count_online}." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={0}, rc={rc}, msg={rsn}" + node.set_running_job_id(-1) # Set it to false after message string + node.set_balanced_job_id(id) + #set_node_offline(node, maxnode) + update_job_hostname(job) + else: + node.set_running_job_id(id) + start_time = time.time() + date_time = datetime.now().strftime("%H:%M:%S") #"%d/%m/%Y %H:%M:%S") + thread_name = threading.current_thread().name + try: + # Build command and strategically map stdout and stderr so that both are mapped to stderr and the pytest rc goes to stdout. + cmd = f"{extra};{job.get_command()} 1>&2; echo $? >&1" + result = subprocess.run([cmd], shell=True, capture_output=True, text=True, timeout=timeout, check=False) + node.set_running_job_id(-1) + job.set_elapsed_time(start_time) + elapsed = job.get_elapsed_time() + rc = int(result.stdout) + + if rc == 0: + job.set_rc(rc) + job.set_success() + rsn = "Job successfully executed." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + pytest_std_out_err = result.stderr + job.set_stdout(message, pytest_std_out_err, date_time) + else: + job_failures = job.get_failure_count() + + if job_failures >= maxjob: + rc = 8 + job.set_rc(rc) + rsn = f"Test exceeded allowable failures={maxjob}." + message = f"Job id={id}, host={hostname}, start time={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + elif job_failures == bal: + rc = 7 + job.set_rc(rc) + node.set_balanced_job_id(id) + set_node_offline(node, maxnode) + update_job_hostname(job) + rsn = f"Job is reassigned to managed node={job.get_hostname()}, job exceeded allowable balance={bal}." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + elif rc == 1: + job.set_rc(rc) + rsn = "Test case failed with an error." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + elif rc == 2: + job.set_rc(int(rc)) + rsn = "Test case execution was interrupted by the user." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + elif rc == 3: + job.set_rc(int(rc)) + rsn = "Internal error occurred while executing test." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + elif rc == 4: + job.set_rc(int(rc)) + rsn = "Pytest command line usage error." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + elif rc == 5: + job.set_rc(int(rc)) + rsn = "No tests were collected." + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + + # Only increment a job failure after evaluating all the RCs + job.increment_failure() + + # Update the node with which jobs failed. A node has all assigned jobs so this ID can be used later for eval. + node.set_failure_job_id(id) + job.set_stdout_and_stderr(message, result.stderr, date_time) + + except subprocess.TimeoutExpired: + node.set_running_job_id(-1) + rc = 9 + job.set_rc(rc) + job.set_elapsed_time(start_time) + elapsed = job.get_elapsed_time() + rsn = f"Job has exceeded subprocess timeout={str(timeout)}" + message = f"Job id={id}, host={hostname}, start={date_time}, elapsed={elapsed}, rc={rc}, thread={thread_name}, msg={rsn}" + job.set_stdout_and_stderr(message, rsn, date_time) + job.increment_failure() + node.set_failure_job_id(id) + else: + # TODO: Is it possible there are no nodes, had an error once but not been able to recreate here. + node.set_running_job_id(-1) + rc = 6 + nodes_count = nodes.len() + node_count_offline = get_nodes_offline_count(nodes) + rsn = f"There are no managed nodes online to run jobs, nodes={nodes_count}, offline={node_count_offline}, online={node_count_online}." + message = f"Job id={id}, host={hostname}, elapsed={job.get_elapsed_time()}, rc={rc}, msg={rsn}" + job.set_stdout_and_stderr(message, rsn, date_time) + job.increment_failure() + node.set_failure_job_id(id) + + return rc, message + + +def runner(jobs: Dictionary, nodes: Dictionary, timeout: int, max: int, bal: int, extra: str, maxnode: int, workers: int, throttle: bool, returncode: bool) -> list[str]: + """ + Method creates an executor to run a job found in the jobs dictionary concurrently. + This method is the key function that allows for concurrent execution of jobs. + + Parameters: + jobs: Dictionary + A dictionary of jobs, the ID is paired to a job. + A job is a test cased designed to be run by pytest. + nodes: Dictionary + Managed nodes that jobs will run on. These are z/OS + managed nodes. + timeout: int + The maximum time in seconds a job should run on z/OS for, + default is 300 seconds. + maxjob: int + The maximum number of times a job can fail before its + disabled in the job queue + bal: int + The count at which a job is balanced from one z/OS node + to another for execution. + extra: str + Extra commands passed to subprocess before pytest execution + maxnode: int + The maximum number of times a node can fail to run a + job before its set to 'offline' in the node queue. + workers: int + The numerical value used to increase the number of worker + threads by proportionally. By default this is 3 that will + yield one thread per node. With one thread per node, test + cases run one at a time on a managed node. This value + is used as a multiple to grow the number of threads and + test concurrency. For example, if there are 5 nodes and + the workers = 3, then 15 threads will be created + resulting in 3 test cases running concurrently. + + Returns: + list[str]: A list of strings, each list entry is describes the jobs lifecycle. + """ + + if workers > 1: + number_of_threads = nodes.len() * workers + else: + number_of_threads = nodes.len() + + result = [] + with ThreadPoolExecutor(number_of_threads,thread_name_prefix='ansible-test') as executor: + futures = [executor.submit(run, key, jobs, nodes, timeout, max, bal, extra, maxnode, throttle) for key, value in jobs.items() if not value.get_successful()] + for future in as_completed(futures): + rc, message = future.result() + if future.exception() is not None: + msg = f"[ERROR] Executor exception occurred with error: {future.exception()}" + result.append(msg) + if not returncode: + print(msg) + elif future.cancelled(): + msg = f"[ERROR] Executor cancelled job, message = {message}" + result.append(msg) + if not returncode: + print(msg) + elif future.done(): + msg = f"[{"INFO" if rc == 0 else "WARN"}] Executor message = {message}" + result.append(msg) + if not returncode: + print(msg) + elif future.running(): + msg = f"[{"INFO" if rc == 0 else "WARN"}] Thread pool is still running = {message}" + result.append(msg) + if not returncode: + print(msg) + + # try: + # for future in as_completed(futures, timeout=200): + # rc, message = future.result() + # print("JOB RC is " + str(rc) + " with message " + message) + # except concurrent.futures.TimeoutError: + # print("this took too long...") + return result + +def elapsed_time(start_time: time): + """ + Given a start time, this will return a formatted string of time matching + pattern HH:MM:SS.SS , eg 00:02:38.36 + + Parameters: + start_time (time): The time the test case has began. This is generally + captured before a test is run. + + Returns: + str: The elapsed time, how long it took a job to run. A string + is returned representing the elapsed time, , eg 00:02:38.36 + """ + + hours, rem = divmod(time.time() - start_time, 3600) + minutes, seconds = divmod(rem, 60) + elapsed = "{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds) + return elapsed + +def print_job_logs(log: list[Tuple[str, str, str]], state: State) -> None: + """ + Prints job logs to the console. If State is of type SUCCESS, prints to stdout, + else prints to stderr. + + Parameters: + log (list[Tuple[str, str, str]]): A list of tuples containing job log information. + state (State): The current state of the program + """ + if len(log) > 0: + for entry in log: + msg=f"------------------------------------------------------------\n"\ + f"[START] [{state.string()}] log entry.\n"\ + "------------------------------------------------------------\n"\ + f"\tJob ID: {entry.id}\n"\ + f"\tHostname: {entry.hostname}\n"\ + f"\tDate time: {entry.date_time}\n"\ + f"\tCommand: {entry.command}\n"\ + f"\tMessage: {entry.message}\n"\ + f"\tStdout: \n\t{entry.std_out_err.replace('\n', '\n\t')}\n"\ + "------------------------------------------------------------\n"\ + f"[END] [{state.string()}] log entry.\n"\ + "------------------------------------------------------------" + if state.is_success(): + print(msg) + else: + print(msg,file=sys.stderr) + # sys.stderr.write(msg) + # sys.stderr.flush() + +def print_job_tests(tests: list[str], state: State) -> None: + """ + Prints the test cases for a job. + + Parameters: + tests (list[str]): A list of strings representing the test cases for a job. + state (State): The current state of the job. + """ + + if len(tests) > 0: + msg_header =f"------------------------------------------------------------\n"\ + f"[START] [{state.string()}] test cases.\n"\ + "------------------------------------------------------------" + if state.is_success(): + print(msg_header) + else: + print(msg_header,file=sys.stderr) + + for entry in tests: + if state.is_success(): + print(f"\t{entry}") + else: + print(f"\t{entry}",file=sys.stderr) + + msg_tail = f"------------------------------------------------------------\n"\ + f"[END] [{state.string()}] test cases.\n"\ + "------------------------------------------------------------" + if state.is_success(): + print(msg_tail) + else: + print(msg_tail,file=sys.stderr) + +def write_job_logs_to_html(log: list[Tuple[str, str, str]], state: State, replay: str) -> str: + """ + Prints job logs to an HTML file using the PrettyTable library. + + Parameters: + log (list[Tuple[str, str, str]]): A list of tuples containing job information. + state (State): The current state of the program. + replay (str): A string indicating whether the user wants to replay the logs or not. + + Returns: + str: An HTML string generated from the job logs. + """ + if len(log) > 0: + table = PrettyTable() + table.hrules=ALL + table.format = False + table.header = True + table.left_padding_width = 1 + table.right_padding_width = 1 + table.field_names = ["Count", "Job ID", "z/OS Managed Node", "Pytest Command", "Message", "Standard Out & Error", "Date and Time"] + table.align["Message"] = "l" + table.align["Standard Out & Error"] = "l" + table.sortby = "Job ID" + + count = 0 + for entry in log: + table.add_row([count, entry.id,entry.hostname, entry.command, entry.message, entry.std_out_err, entry.date_time]) + count +=1 + + html = table.get_html_string(attributes={'border': '1', "style":"white-space:nowrap;width:100%;border-collapse: collapse"}) + date_time = datetime.now().strftime("%H:%M:%S") + with open(f"/tmp/concurrent-executor-log-replay-{replay}-{state.string()}-{date_time}.html", "w", encoding="utf-8") as file: + file.write(html) + file.close() + +def write_results_to_file(results: list[str]) -> None: + """ + Write the results of a replay to a file. + + Parameters: + results (list[str]): A list of strings representing the results of each action taken during the replay. + replay (str): The name of the replay. + """ + date_time = datetime.now().strftime("%H:%M:%S") + with open(f"/tmp/concurrent-executor-log-{date_time}.txt", "w", encoding="utf-8") as file: + for result in results: + file.write(f"{result}\n") + file.close() + +def write_job_tests_to_html(tests: list[str], state: State, replay: str) -> None: + """ + Prints job tests to HTML. + + Parameters: + tests (list[str]): A list of test cases. + state (State): The current state of the job. + replay (str): The replay ID of the job. + """ + if len(tests) > 0: + table = PrettyTable() + table.hrules=ALL + table.format = False + table.header = True + table.left_padding_width = 1 + table.right_padding_width = 1 + table.field_names = ["Count", "Test Case"] + table.align["Test Case"] = "l" + table.sortby = "Count" + + count = 0 + for entry in tests: + table.add_row([count, entry]) + count +=1 + + html = table.get_html_string(attributes={'border': '1', "style":"white-space:nowrap;width:100%;border-collapse: collapse"}) + date_time = datetime.now().strftime("%H:%M:%S") + with open(f"/tmp/concurrent-executor-tests-replay-{replay}-{state.string()}-{date_time}.html", "w", encoding="utf-8") as file: + file.write(html) + file.close() + +def print_nodes(nodes: Dictionary) -> list[str]: + """ + Prints the names of all z/OS nodes in the provided dictionary. + + Parameters: + nodes (Dictionary): A dictionary containing z/OS node names as keys and values. + + Returns: + list[str] - A list of strings representing the names of all z/OS nodes in the provided dictionary. + + """ + result = [] + count = 1 + if nodes.len() > 0: + msg = f"[INFO] There are {nodes.len()} managed nodes serving this play." + result.append(msg) + print(msg) + + for key, _ in nodes.items(): + msg = f"[INFO] Node {count} = {key}" + result.append(msg) + print(msg) + count +=1 + return result + +def execute(args) -> int: + """ + This function is responsible for executing the tests on the nodes. It takes in several arguments such as the user, + the tests to be run, the maximum number of times a job can fail, and more. The function returns no value. + + Args: + args (Namespace): A Namespace object containing various arguments passed to the script. + + Returns: + int: The exit code of the executor. + - Non-zero means there was an error during execution and at least one test case has failed. + - Zero return code means all tests cases successfully passed. + + Notes: + The concurrent executor will always produce a textual log in /tmp with this named file pattern + 'concurrent-executor-log-{date_time}.txt'. While there are logs it will produce, those are + selected with the command line options. On non-zero return code, its advised the textual log + be evaluated. + """ + play_result = [] + count_play = 1 + count = 1 + replay = False + return_code = 0 + + while count_play <= args.replay: + message = f"\n=================================================\n[START] PLAY {count_play} {f"of {args.replay} " if args.replay > 1 else ""}started.\n=================================================" + if not args.returncode: + print(message) + play_result.append(message) + + start_time_full_run = time.time() + + # Get a dictionary of all active zos_nodes to run tests on + nodes = get_nodes(user = args.user, zoau = args.zoau, pyz = args.pyz, hostnames = args.hostnames, pythonpath = args.pythonpath, volumes = args.volumes) + if not args.returncode: + play_result.extend(print_nodes(nodes)) + + # Get a dictionary of jobs containing the work to be run on a node. + jobs = get_jobs(nodes, paths=args.paths, skip=args.skip, capture=args.capture, verbosity=args.verbosity, replay=replay) + iterations_result="" + number_of_threads = nodes.len() * args.workers + + stats = get_jobs_statistics(jobs, args.maxjob) + job_count_progress = 0 + while stats.jobs_success_count != stats.jobs_total_count and count <= int(args.itr): + message = f"\n-----------------------------------------------------------\n[START] Thread pool iteration = {count} {f"of {args.itr} " if args.itr > 1 else ""}, pending = {stats.jobs_total_count - stats.jobs_success_count}.\n-----------------------------------------------------------" + play_result.append(message) + if not args.returncode: + print(message) + + start_time = time.time() + play_result.extend(runner(jobs, nodes, args.timeout, args.maxjob, args.bal, args.extra, args.maxnode, args.workers, args.throttle, args.returncode)) + + stats = get_jobs_statistics(jobs, args.maxjob) + iterations_result += f"- Thread pool iteration {count} completed {stats.jobs_success_count - job_count_progress} job(s) in {elapsed_time(start_time)} time, pending {stats.jobs_failed_count} job(s).\n" + + info = f"-----------------------------------------------------------\n[END] Thread pool iteration = {count} {f"of {args.itr} " if args.itr > 1 else ""}, pending = {stats.jobs_failed_count}.\n-----------------------------------------------------------" + play_result.append(info) + if not args.returncode: + print(info) + + count +=1 + job_count_progress = stats.jobs_success_count + + msg = f"\n-----------------------------------------------------------\n[RESULTS] for play {count_play} {f"of {args.replay} " if args.replay > 1 else ""}.\n-----------------------------------------------------------" + play_result.append(msg) + if not args.returncode: + print(msg) + + msg = f"All {count - 1} thread pool iterations completed in {elapsed_time(start_time_full_run)} time, with {number_of_threads} threads running concurrently." + play_result.append(msg) + if not args.returncode: + print(msg) + + if not args.returncode: + print(iterations_result) + play_result.append(iterations_result) + + msg = f"Number of jobs queued to be run = {stats.jobs_total_count}." + play_result.append(msg) + if not args.returncode: + print(msg) + + msg = f"Number of jobs that run successfully = {stats.jobs_success_count}." + play_result.append(msg) + if not args.returncode: + print(msg) + + msg = f"Total number of jobs that failed = {stats.jobs_failed_count}." + play_result.append(msg) + if not args.returncode: + print(msg) + + msg = f"Number of jobs that failed great than or equal to {str(args.maxjob)} times = {stats.jobs_failed_count_maxjob}." + play_result.append(msg) + if not args.returncode: + print(msg) + + msg = f"Number of jobs that failed less than {str(args.maxjob)} times = {stats.jobs_failed_count - stats.jobs_failed_count_maxjob}." + play_result.append(msg) + if not args.returncode: + print(msg) + + msg = f"Number of jobs that were balanced = {stats.jobs_rebalanced_count}." + play_result.append(msg) + if not args.returncode: + print(msg) + + message = f"\n=================================================\n[END] PLAY {count_play} {f"of {args.replay} " if args.replay > 1 else ""}ended.\n=================================================" + play_result.append(message) + if not args.returncode: + print(msg) + + # ---------------------------------------------- + # Print each play to STDOUT and/or write results. + # ---------------------------------------------- + if args.verbose: + # Print to stdout any failed test cases and their relevant pytest logs + print_job_tests(stats.jobs_failed_tests, State.FAILURE) + print_job_logs(stats.jobs_failed_log, State.FAILURE) + # Print to stdout any test cases that exceeded the value max number of times a job can fail. + print_job_tests(stats.jobs_failed_maxjob_tests, State.EXCEEDED) + print_job_logs(stats.jobs_failed_maxjob_log, State.EXCEEDED) + # Print to stdout all successful test cases and their relevant logs. + print_job_tests(stats.jobs_success_tests, State.SUCCESS) + print_job_logs(stats.jobs_success_log, State.SUCCESS) + + # Print to HTML any failed test cases and their relevant pytest logs + write_job_tests_to_html(stats.jobs_failed_tests, State.FAILURE, count_play) + write_job_logs_to_html(stats.jobs_failed_log, State.FAILURE, count_play) + + # Print to HTML any test cases that exceeded the value max number of times a job can fail. + write_job_tests_to_html(stats.jobs_failed_maxjob_tests, State.EXCEEDED, count_play) + write_job_logs_to_html(stats.jobs_failed_maxjob_log, State.EXCEEDED, count_play) + + # Print to HTML all successful test cases and their relevant logs. + write_job_tests_to_html(stats.jobs_success_tests, State.SUCCESS, count_play) + write_job_logs_to_html(stats.jobs_success_log, State.SUCCESS, count_play) + + # If replay, repeat concurrent executor with failed tests only, else advance count_play and end the program + if stats.jobs_failed_count > 0: + args.paths = ','.join(stats.jobs_failed_tests) + count_play +=1 + count = 1 + replay = True + # return_code = 1 + return_code = stats.jobs_failed_count + else: + count_play = args.replay + 1 + + # Print the cumulative result of all plays to a file + write_results_to_file(play_result) + + return return_code + +def main(): + """ Main """ + parser = argparse.ArgumentParser( + prog='ce.py', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=textwrap.dedent(''' + Examples + -------- + 1) Execute a single test suite for up to 5 iterations for ibmuser with shared zoau and python installations. + Note, usage of --tests "../tests/functional/modules/test_zos_tso_command_func.py" + $ python3 ce.py\\ + --pyz "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz"\\ + --zoau "/zoau/v1.3.1"\\ + --itr 5\\ + --tests "../tests/functional/modules/test_zos_tso_command_func.py"\\ + --user "ibmuser"\\ + --timeout 100 + + 2) Execute a multiple test suites for up to 10 iterations for ibmuser with shared zoau and python installations. + Note, usage of --tests "../tests/functional/modules/test_zos_tso_command_func.py,../tests/functional/modules/test_zos_find_func.py" + $ python3 ce.py\\ + --pyz "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz"\\ + --zoau "/zoau/v1.3.1"\\ + --itr 10\\ + --tests "../tests/functional/modules/test_zos_tso_command_func.py,../tests/functional/modules/test_zos_find_func.py"\\ + --user "ibmuser"\\ + --timeout 100 + + 3) Execute a test suites in a directory for up to 4 iterations for ibmuser with shared zoau and python installations. + Note, usage of --directories "../tests/functional/modules/,../tests/unit/" + $ python3 ce.py\\ + --pyz "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz"\\ + --zoau "/zoau/v1.3.1"\\ + --itr 4\\ + --directories "../tests/functional/modules/,../tests/unit/"\\ + --user "ibmuser"\\ + --timeout 100 + + 4) Execute test suites in multiple directories for up to 5 iterations for ibmuser with shared zoau and python installations. + Note, usage of "--directories "../tests/functional/modules/,../tests/unit/" + $ python3 ce.py\\ + --pyz "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz"\\ + --zoau "/zoau/v1.3.1"\\ + --itr 5\\ + --directories "../tests/functional/modules/,../tests/unit/"\\ + --user "ibmuser"\\ + --timeout 100\\ + --max 6\\ + --bal 3 + + 5) Execute test suites in multiple directories with up to 5 iterations for ibmuser with attributes, zoau, pyz using a max timeout of 100, max failures of 6 and balance of 3. + Note, usage of "--directories "../tests/functional/modules/,../tests/unit/" + $ python3 ce.py\\ + --pyz "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz"\\ + --zoau "/zoau/v1.3.1"\\ + --itr 5\\ + --directories "../tests/functional/modules/,../tests/unit/"\\ + --user "ibmuser"\\ + --timeout 100\\ + --maxjob 6\\ + --bal 3\\ + --maxnode 4\\ + --hostnames "ec33025a.vmec.svl.ibm.com,ec33025a.vmec.svl.ibm"\\ + --verbosity 3\\ + --capture\\ + --workers 3\\ + --extra "cd .." + + python3 ce.py\\ + --pyz "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz"\\ + --zoau "/zoau/v1.3.1"\\ + --itr 3\\ + --paths "/Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_load_balance.py.py"\\ + --user "omvsadm"\\ + --extra "cd .."\\ + --maxnode 5\\ + --verbosity 1\\ + --no-capture\\ + --workers 1\\ + --maxjob 10\\ + --hostnames "ec01130a.vmec.svl.ibm.com"\\ + --timeout 300\\ + --replay 2\\ + --bal 2\\ + --volumes "222222,000000"\\ + --pythonpath "/zoau/v1.3.1/lib/3.10"\\ + --no-verbose\\ + --no-throttle + ''')) + + # Options + parser.add_argument('--extra', type=str, help='Extra commands passed to subprocess before pytest execution', required=False, metavar='', default="") + parser.add_argument('--pyz', type=str, help='Python Z home directory.', required=True, metavar='', default="/usr/lpp/python") + parser.add_argument('--zoau', type=str, help='ZOAU home directory.', required=True, metavar='', default="/usr/lpp/zoau") + parser.add_argument('--itr', type=int, help='How many iterations to run CE, each iteration runs only failed tests, exits early if there are no tests to run, default = 12.', required=True, metavar='', default="12") + parser.add_argument('--skip', type=str, help='Skip test suites.', required=False, metavar='', default="") + parser.add_argument('--user', type=str, help='Ansible user authorized to run tests on the managed node.', required=False, metavar='', default="") + parser.add_argument('--timeout', type=int, help='The maximum time in seconds a job should wait for completion, default = 300.', required=False, metavar='', default="300") + parser.add_argument('--maxjob', type=int, help='The maximum number of times a job can fail before its removed from the job queue.', required=False, metavar='', default="10") + parser.add_argument('--bal', type=int, help='The failure count at which a job is assigned to a new managed node, default = 5 .', required=False, metavar='', default="5") + parser.add_argument('--hostnames', help='List of managed nodes to use, overrides the auto detection, must be a comma delimited string.', required=False, metavar='', default=None, nargs='*') + parser.add_argument('--maxnode', type=int, help='The maximum number of test failures permitted for a managed node before the node is set to can fail to \'offline\' in the node queue, default = 10.', required=False, metavar='', default=10) + parser.add_argument('--verbosity', type=int, help='The pytest verbosity level to use, 1 = -v, 2 = -vv, 3 = -vvv, 4 = -vvvv, default = 0.', required=False, metavar='', default=0) + parser.add_argument('--capture', action=argparse.BooleanOptionalAction, help='Instruct Pytest whether to capture any output, equivalent of pytest -s, default = --no-capture.', required=False, default=False) + parser.add_argument('--workers', type=int, help='The numerical multiplier used to increase the number of worker threads, this is multiplied by the managed nodes to calculate threadsf.', required=False, metavar='', default=1) + parser.add_argument('--replay', type=int, help='This value will instruct the tool to replay the entire command for only the failed test cases.', required=False, metavar='', default=1) + parser.add_argument('--pythonpath', type=str, help='Absolute path to the ZOAU Python modules, precompiled or wheels.', required=True, metavar='', default="") + parser.add_argument('--volumes', type=str, help='The volumes to use with the test cases, overrides the auto volume assignment.', required=False, metavar='', default="222222,000000") + parser.add_argument('--verbose', action=argparse.BooleanOptionalAction, help='Enables verbose stdout, default = --no-verbose.', required=False, default=False) + parser.add_argument('--throttle', action=argparse.BooleanOptionalAction, help='Enables managed node throttling such that a managed node will only execute one job at at time, no matter the threads, default --throttle', required=False, default=True) + parser.add_argument('--paths', type=str, help='Test paths', required=True, metavar='', default="") + parser.add_argument('--returncode', action=argparse.BooleanOptionalAction, help='RC only, --returncode, --no-returncode', required=False, default=False) + + + args = parser.parse_args() + # A replay of 0 will result in no execution of CE + if args.replay <=0: + raise ValueError(f"Value '--replay' = {args.replay}, must be greater than or equal to 1.") + + # If workers is > 1, throttle should be disabled else the workers would not be running concurrently. + if args.workers > 1: + args.throttle = "--no--throttle" + + # Evaluate + # Maxjob should always be less than itr else it makes no sense + # if int(args.maxjob) > int(args.itr): + # raise ValueError(f"Value '--maxjob' = {args.maxjob}, must be less than --itr = {args.itr}, else maxjob will have no effect.") + + if int(args.bal) > int(args.maxjob): + raise ValueError(f"Value '--bal' = {args.bal}, must be less than --maxjob = {args.itr}, else balance will have no effect.") + + # Execute/begin running the concurrency testing with the provided args. + rc = execute(args) + if args.returncode: + print(rc) + return rc + +if __name__ == '__main__': + main() diff --git a/scripts/requirements-2.11.env b/scripts/configurations/requirements-2.11.env similarity index 100% rename from scripts/requirements-2.11.env rename to scripts/configurations/requirements-2.11.env diff --git a/scripts/requirements-2.12.env b/scripts/configurations/requirements-2.12.env similarity index 98% rename from scripts/requirements-2.12.env rename to scripts/configurations/requirements-2.12.env index 229e4edcb..4a0516fc4 100644 --- a/scripts/requirements-2.12.env +++ b/scripts/configurations/requirements-2.12.env @@ -25,7 +25,7 @@ requirements=( "ansible-core:2.12.10" "pylint" "rstcheck" -"ansible-lint:6.22.1" +"ansible-lint:24.7.0" ) python=( diff --git a/scripts/requirements-2.13.env b/scripts/configurations/requirements-2.13.env similarity index 98% rename from scripts/requirements-2.13.env rename to scripts/configurations/requirements-2.13.env index 4720e9352..7923ad23a 100644 --- a/scripts/requirements-2.13.env +++ b/scripts/configurations/requirements-2.13.env @@ -25,7 +25,7 @@ requirements=( "ansible-core:2.13.13" "pylint" "rstcheck" -"ansible-lint:6.22.1" +"ansible-lint:24.7.0" ) python=( diff --git a/scripts/requirements-2.14.env b/scripts/configurations/requirements-2.14.env similarity index 96% rename from scripts/requirements-2.14.env rename to scripts/configurations/requirements-2.14.env index 40a80dbf2..531ad47f2 100644 --- a/scripts/requirements-2.14.env +++ b/scripts/configurations/requirements-2.14.env @@ -22,10 +22,10 @@ # ============================================================================== requirements=( -"ansible-core:2.14.16" +"ansible-core:2.14.17" "pylint" "rstcheck" -"ansible-lint:6.22.1" +"ansible-lint:24.7.0" ) python=( diff --git a/scripts/requirements-2.15.env b/scripts/configurations/requirements-2.15.env similarity index 96% rename from scripts/requirements-2.15.env rename to scripts/configurations/requirements-2.15.env index 4ca546686..149819def 100644 --- a/scripts/requirements-2.15.env +++ b/scripts/configurations/requirements-2.15.env @@ -22,10 +22,10 @@ # ============================================================================== requirements=( -"ansible-core:2.15.11" +"ansible-core:2.15.12" "pylint" "rstcheck" -"ansible-lint:6.22.1" +"ansible-lint:24.7.0" ) python=( diff --git a/scripts/requirements-2.16.env b/scripts/configurations/requirements-2.16.env similarity index 96% rename from scripts/requirements-2.16.env rename to scripts/configurations/requirements-2.16.env index 050c27aca..77e8990ee 100644 --- a/scripts/requirements-2.16.env +++ b/scripts/configurations/requirements-2.16.env @@ -22,10 +22,10 @@ # ============================================================================== requirements=( -"ansible-core:2.16.3" +"ansible-core:2.16.9" "pylint" "rstcheck" -"ansible-lint:6.22.1" +"ansible-lint:24.7.0" ) python=( diff --git a/scripts/requirements-2.17.env b/scripts/configurations/requirements-2.17.env similarity index 96% rename from scripts/requirements-2.17.env rename to scripts/configurations/requirements-2.17.env index c61c03626..c0a7373db 100644 --- a/scripts/requirements-2.17.env +++ b/scripts/configurations/requirements-2.17.env @@ -22,10 +22,10 @@ # ============================================================================== requirements=( -"ansible-core:2.17.0b1" +"ansible-core:2.17.2" "pylint" "rstcheck" -"ansible-lint:6.22.2" +"ansible-lint:24.7.0" ) python=( diff --git a/scripts/requirements-2.9.env b/scripts/configurations/requirements-2.9.env similarity index 100% rename from scripts/requirements-2.9.env rename to scripts/configurations/requirements-2.9.env diff --git a/scripts/requirements-common.env b/scripts/configurations/requirements-common.env similarity index 68% rename from scripts/requirements-common.env rename to scripts/configurations/requirements-common.env index 5f76436bf..8c787701b 100644 --- a/scripts/requirements-common.env +++ b/scripts/configurations/requirements-common.env @@ -12,14 +12,20 @@ # limitations under the License. # ============================================================================== -# ============================================================================== -# File name must adhere to reqs-common.sh and not change. This supplies the -# venv's with additional packages for use by the developement work flow. -# ============================================================================== +# ------------------------------------------------------------------------------ +# Description: Supplies venv's with additional packages for use by the AC. +# ------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ +# Note: +# ------------------------------------------------------------------------------ +# (1) "pylint", "rstcheck", "six", "voluptuous", "yamllint" is a common package +# but the requirements-xx.env have it frozen to each of their specific needs +# (2) Package "cryptography:42.0.8" is frozen becasue of this warning message +# reported with a pending PR. https://github.com/paramiko/paramiko/issues/2419 +# In time, after the above PR merges, the latest can be evaluated. +# ------------------------------------------------------------------------------ -# Notes, "pylint", "rstcheck", "six", "voluptuous", "yamllint" is common but -# various requirements.txt have it frozen so it becomes a double requement -# error if present here as well. requirements=( "alabaster" "ansible-builder" @@ -39,7 +45,7 @@ requirements=( "cffi" "charset-normalizer" "click" -"cryptography" +"cryptography:42.0.8" "dill" "distlib" "distro" @@ -116,13 +122,18 @@ requirements=( "webencodings" "wrapt" "zipp" +"paramiko" +"prettytable" ) -# This original list caused some issues with pytest seeing our conftest plugin +# This reduced list caused some issues with pytest seeing our conftest plugin # as already registered, the only time senstive solution I could come up with # was to pip freeze a working venv and use that as the common base for now, over # time, using pip show on each of these packages to figure out why # this occurs or maybe using pipdeptree will visually help. +# -------------------- +# Reduced list below: +# -------------------- # requirements=( # "bandit" # "pipdeptree" diff --git a/scripts/requirements-latest.env b/scripts/configurations/requirements-latest.env similarity index 100% rename from scripts/requirements-latest.env rename to scripts/configurations/requirements-latest.env diff --git a/scripts/hosts.env b/scripts/hosts.env index 58075263d..0a5ccc70e 100644 --- a/scripts/hosts.env +++ b/scripts/hosts.env @@ -22,68 +22,68 @@ # fi # fi -host_list_str="ddimatos:ec33017a${HOST_SUFFIX}:${USER}:${PASS} "\ -"richp:ec01132a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ketan:ec33018a${HOST_SUFFIX}:${USER}:${PASS} "\ -"iamorenosoto:ec01134a${HOST_SUFFIX}:${USER}:${PASS} "\ -"fernando:ec01135a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01105a:ec01105a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01129a:ec01129a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01130a:ec01130a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01131a:ec01131a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01132a:ec01132a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01133a:ec01133a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01134a:ec01134a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01135a:ec01135a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01136a:ec01136a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01137a:ec01137a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01138a:ec01138a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01139a:ec01139a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01140a:ec01140a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01145a:ec01145a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01146a:ec01146a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01147a:ec01147a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01148a:ec01148a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01149a:ec01149a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01150a:ec01150a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01151a:ec01151a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01152a:ec01152a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01153a:ec01153a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec01154a:ec01154a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec03071a:ec03071a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec03102a:ec03102a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec03127a:ec03127a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec03129a:ec03129a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec03173a:ec03173a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec03175a:ec03175a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec32016a:ec32016a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec32024a:ec32024a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec32051a:ec32051a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33002a:ec33002a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33003a:ec33003a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33004a:ec33004a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33005a:ec33005a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33006a:ec33006a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33006a:ec33006a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33007a:ec33007a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33008a:ec33008a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33009a:ec33009a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33010a:ec33010a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33011a:ec33011a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33012a:ec33012a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33013a:ec33013a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33013a:ec33013a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33014a:ec33014a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33015a:ec33015a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33016a:ec33016a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33017a:ec33017a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33018a:ec33018a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33019a:ec33019a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33020a:ec33020a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33021a:ec33021a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33022a:ec33022a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33023a:ec33023a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33024a:ec33024a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33025a:ec33025a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33026a:ec33026a${HOST_SUFFIX}:${USER}:${PASS} "\ -"ec33027a:ec33027a${HOST_SUFFIX}:${USER}:${PASS} " +host_list_str="ddimatos:ec33017a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"richp:ec01132a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ketan:ec33018a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"iamorenosoto:ec01134a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"fernando:ec01135a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01105a:ec01105a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01129a:ec01129a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01130a:ec01130a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec01131a:ec01131a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01132a:ec01132a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec01133a:ec01133a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01134a:ec01134a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01135a:ec01135a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01136a:ec01136a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01137a:ec01137a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01138a:ec01138a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01139a:ec01139a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01140a:ec01140a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01145a:ec01145a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01146a:ec01146a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01147a:ec01147a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01148a:ec01148a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01149a:ec01149a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec01150a:ec01150a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01151a:ec01151a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec01152a:ec01152a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec01153a:ec01153a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec01154a:ec01154a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec03071a:ec03071a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec03102a:ec03102a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec03127a:ec03127a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec03129a:ec03129a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec03173a:ec03173a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec03175a:ec03175a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec32016a:ec32016a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec32024a:ec32024a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec32051a:ec32051a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33002a:ec33002a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33003a:ec33003a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33004a:ec33004a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33005a:ec33005a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33006a:ec33006a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33006a:ec33006a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33007a:ec33007a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33008a:ec33008a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33009a:ec33009a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33010a:ec33010a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33011a:ec33011a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33012a:ec33012a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33013a:ec33013a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33013a:ec33013a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33014a:ec33014a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33015a:ec33015a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33016a:ec33016a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33017a:ec33017a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33018a:ec33018a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33019a:ec33019a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33020a:ec33020a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33021a:ec33021a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33022a:ec33022a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec33023a:ec33023a${HOST_SUFFIX}:${USER}:${PASS}:development "\ +"ec33024a:ec33024a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec33025a:ec33025a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec33026a:ec33026a${HOST_SUFFIX}:${USER}:${PASS}:production "\ +"ec33027a:ec33027a${HOST_SUFFIX}:${USER}:${PASS}:development " diff --git a/scripts/info.env.axx b/scripts/info.env.axx index b2e6308fb..c756e6dfe 100755 --- a/scripts/info.env.axx +++ b/scripts/info.env.axx @@ -1,15 +1,15 @@ -U2FsdGVkX18VoSEji2kkFCFNcDHC1mzJ+hUulvTheU5dL9E/lmDWS6qdk8R1VCPJ -WyRU1Zefxvc1fw/sqvmzliUgBXXN6dOgRv73+ap8vyp+IvUhCVAZl9efFXHZ2Eag -6loROID0Qq28Bd+5Btqk/JuC6az9QvnV1E4MRhmZBtCIJ8P/joXKIigR9KHGvL0N -7PpA20UxvMzSH/vQSFd0zkuuvjAAzxN/AVO3W0Jbw1fmHy0gqp4TxidqXF0JatdC -YtDadHqyGHCid3hDP0+GwS4yCSEL/uNEE1e3Npe5EF52YB1OE5y7WqJFmQT1OdNd -pkpPok73YNyPtetMBzIr6t3BcnXHL1j38lrDcMZvBy9RWQ2LQiSxmRiGanEg+i9L -SBapVYDJJ64eKZd7T7gY4gViytT0/i6IAqgGqoH0Dk9LQnGmQ7bOqi34zOna/iC2 -PFzx8XFZF/BmXQm3/96xJsRv15IMKCRp2t9lha0N/FRVmEYp7n5loi6oj5hCtD5k -CV1nbzO9cvMH1c85LUeWjTfcEmTA0criSCiBY3zLywrBvs6XsV6EkITMjPh1K2ht -AHXVPykPHhG6+F0LPYS4gasc0jLRTCxVyPRrl3tSf5aGvvo7ilsZrUtVh2UKUkuN -bjpUHCsrsV17LZUb5fWbY3B0EB1NxHa2rO3cb0ausUd+Mf+02SlnPsnaxjX7lTna -ymUlYs6oQcfAfhHM1mCf8miS4ES2HBdl9Urk9BiepSRJudoaBjIL/L9IsaInYpdv -BfW04gocwKJOhhGUE5ql4+DBfoCrWbz4bIGlUSfEIdFiRmsHG8723JQXgq19c4il -oFe7inTT14QHNsI7JNWmDDxsBPkItgJJ00JR+WwZd77jDTHJhlXuf8lYevQCRKla -BDZ3DlqvbK2ILBWFz6XTjPdlNu2fYsxlW4R5kLKsTyI= +U2FsdGVkX18M4UEJJX9P8cXv2ySLpbNCQ4Jf2++13npBAMHEIow8TQCKBeyiBfTa +lk6ALYivmT0ktGLtvS+/zEIBnvh1Gq2kH3GO7AQkg6JQxla8EpS9b7RpFCNW0XpY +f6+sSBpUxIqCSlE7mufKxIPMe3lNVL7F2eRqiOj87kul6zoMrOp+FSQ/y8pF97+H +xd35tygalQmsGUYdZ9F143/cYUy3t5m85AUJOD/R6a2u7OFlhLcqWYhw9kzduThA +sauoYKeiNlnRJ5mNq87tQXWCYWquxBk+TXrQXZ2qxmR0Njz3q2aIPcPZzxyK5dNu +8DdO0Ya5qAVWD685CnHrpJ9lbDGkUyEz6qXkaNdUkxsgvWHCQIwZlRfe+Tpggt9M +D2D/q06zoHmsJ3cnfpSLFSjupLoc56wmMe2dmK5kDKvKqnqFi546Du9+/xq22X+x +gwg3/S3fU2CVp89dQKWazpR9U5k7FAZzZ2lyZ3ZpPknAOoNWM+sgaczJbkKjnIeV +7agLgTzasD/bNyIOmM9NNGYEN4AHHV8iKxpbr7swl4Kfjn+l+DBidGd/L/I5Mtu6 +USiVkOF5LQzqi+Dmdf6Yk8CsRp8wh+hFCVTp5oFs0oirSqEv/BXdWWUPfER5yZUn +K+HvjElcMK3nrvPb3SfdhmonvQsRbH80Ju5i3/vWWzA9+WN88aEak4shjG1j51gU +q0Asm3qHtb0CdsFJwNMbwR8gelhUZWErC6o/APcCYwTp364Up4aIrULfsBG4CwTf +fsvqzAJIoiV2vF6wgYUC3gOzbhLWLwlUPTbS0z4xbYt36uhSniUv868c1FfTNhcm +D1o8OGq2yiJj65jHq038TjLfRkpc1ov3CRWSrYfved5U7dLBgGOZ4dBOhtCRBMn7 +/pi6FVPc8HIRFlO6ubN9UIv54MGBItJT1T+7Ie4HQTw= diff --git a/scripts/modules/connection.py b/scripts/modules/connection.py new file mode 100644 index 000000000..30dbcdd7e --- /dev/null +++ b/scripts/modules/connection.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) IBM Corporation 2024 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Connection class wrapping paramiko. The wrapper provides methods allowing +for remote environment variables be set up so that they can interact with +ZOAU. Generally this is a wrapper to paramiko to be used to write files to +z/OS. For example, consider writing a file that is managed by this load +balancer that can know if there is another test running. The idea was to +update our pytest fixture to look for that file so that a remote pipeline or +other instance does not try to run concurrently until the functional tests +can be properly vetted out for concurrent support. +Usage: + key_file_name = os.path.expanduser('~') + "/.ssh/id_dsa" + connection = Connection(hostname="ibm.com", username="root", key_filename=key_file_name) + client = connection.connect() + result = connection.execute(client, "ls") + print(result) +""" + +# pylint: disable=too-many-instance-attributes, too-many-arguments + +from socket import error +from paramiko import SSHClient, AutoAddPolicy, BadHostKeyException, \ + AuthenticationException, SSHException, ssh_exception + +class Connection: + """ + Connection class wrapping paramiko. The wrapper provides methods allowing + for remote environment variables be set up so that they can interact with + ZOAU. Generally this is a wrapper to paramiko to be used to write files to + z/OS. For example, consider writing a file that is managed by this load + balancer that can know if there is another test running. The idea was to + update our pytest fixture to look for that file so that a remote pipeline or + other instance does not try to run concurrently until the functional tests + can be properly vetted out for concurrent support. + + Usage: + key_file_name = os.path.expanduser('~') + "/.ssh/id_dsa" + connection = Connection(hostname="ibm.com", username="root", key_filename=key_file_name) + client = connection.connect() + result = connection.execute(client, "ls") + print(result) + """ + + def __init__(self, hostname, username, password = None, key_filename = None, + passphrase = None, port=22, environment= None ): + self._hostname = hostname + self.port = port + self._username = username + self.password = password + self.key_filename = key_filename + self.passphrase = passphrase + self.environment = environment + self.env_str = "" + if self.environment is not None: + self.env_str = self.set_environment_variable(**self.environment) + + + def __to_dict(self) -> str: + """ + Method returns constructor arguments to a dictionary, must remain private to + protect credentials. + """ + temp = { + "hostname": self._hostname, + "port": self.port, + "username": self._username, + "password": self.password, + "key_filename": self.key_filename, + "passphrase": self.passphrase, + } + + for k,v in dict(temp).items(): + if v is None: + del temp[k] + return temp + + def connect(self) -> SSHClient: + """ + Create the connection after the connection class has been initialized. + + Return + SSHClient: paramiko SSHClient, client used the execution of commands. + + Raises: + BadHostKeyException + AuthenticationException + SSHException + FileNotFoundError + error + """ + ssh = None + + n = 0 + while n <= 10: + try: + ssh = SSHClient() + ssh.set_missing_host_key_policy(AutoAddPolicy()) + ssh.connect(**self.__to_dict(), disabled_algorithms= + {'pubkeys': ['rsa-sha2-256', 'rsa-sha2-512']}) + except BadHostKeyException as e: + print('Host key could not be verified.', str(e)) + raise e + except AuthenticationException as e: + print('Authentication failed.', str(e)) + raise e + except ssh_exception.SSHException as e: + print(e, str(e)) + raise e + except FileNotFoundError as e: + print('Missing key filename.', str(e)) + raise e + except error as e: + print('Socket error occurred while connecting.', str(e)) + raise e + return ssh + + def execute(self, client, command): + """ + Parameters: + client (paramiko SSHClient) SSH Client created through connection.connect() + command (str): command to run + + Returns: + dict: a dictionary with stdout, stderr and command executed + + Raises + SSHException + """ + + response = None + get_pty_bool = True + out = "" + try: + # We may need to create a channel and make this synchronous + # but get_pty should help avoid having to do that + (_, stdout, stderr) = client.exec_command(self.env_str+command, get_pty=get_pty_bool) + + if get_pty_bool is True: + out = stdout.read().decode().strip('\r\n') + error_msg = stderr.read().decode().strip('\r\n') + else: + out = stdout.read().decode().strip('\n') + error_msg = stderr.read().decode().strip('\n') + + # Don't shutdown stdin, we are reusing this connection in the services instance + # client.get_transport().open_session().shutdown_write() + + response = {'stdout': out, + 'stderr': error_msg, + 'command': command + } + + except SSHException as e: + # if there was any other error connecting or establishing an SSH session + print(e) + finally: + client.close() + + return response + + def set_environment_variable(self, **kwargs): + """ + Provide the connection with environment variables needed to be exported + such as ZOAU env vars. + + Example: + env={"_BPXK_AUTOCVT":"ON", + "ZOAU_HOME":"/zoau/v1.2.0f", + "PATH":"/zoau/v1.2.0f/bin:/python/usr/lpp/IBM/cyp/v3r8/pyz/bin:/bin:.", + "LIBPATH":"/zoau/v1.2.0f/lib:/lib:/usr/lib:.", + "PYTHONPATH":"/zoau/v1.2.0f/lib", + "_CEE_RUNOPTS":"FILETAG(AUTOCVT,AUTOTAG) POSIX(ON)", + "_TAG_REDIR_ERR":"txt", + "_TAG_REDIR_IN":"txt", + "_TAG_REDIR_OUT":"txt", + "LANG":"C" + } + connection = Connection(hostname="ibm.com", username="root", + key_filename=key_filename, environment=env) + + """ + env_vars = "" + export="export" + if kwargs is not None: + for key, value in kwargs.items(): + env_vars = f"{env_vars}{export} {key}=\"{value}\";" + return env_vars diff --git a/scripts/modules/utils.py b/scripts/modules/utils.py new file mode 100644 index 000000000..4315cc9c4 --- /dev/null +++ b/scripts/modules/utils.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) IBM Corporation 2024 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utility classes +""" + +# pylint: disable=too-many-locals, modified-iterating-list, too-many-nested-blocks +# pylint: disable=too-many-branches, too-many-statements, line-too-long + +from pathlib import Path +import subprocess + + +def get_test_cases(paths: str, skip: str = None) -> list[str]: + """ + Returns a list of test cases suitable for pytest to execute. Can discover test + cases from either a directory of test suites or a list of test suites. Will also + remove any skipped tests if specified , wether a directory or a specific test. + + Parameters: + paths (str): Absolute path of directories containing test suites or absolute + path of individual test suites comma or space delimited. + A directory of test cases is such that it contains test suites. + A test suite is a collection of test cases in a file that starts with + 'test' and ends in '.py'. + skip (str): (Optional) Absolute path of either test suites, or test cases. + Test cases can be parametrized such they use the '::' syntax or not. + Skip does not support directories. + + Returns: + list[str] A list of strings containing a modified path to each test suite. + The absolute path is truncated to meet the needs of pytest which starts at + the `tests` directory. + + Raises: + FileNotFoundError : If a test suite, test case or skipped test cannot be found. + ValueError: If paths is not provided. + + Examples: + Example collects all test cases for test suites`test_zos_job_submit_func.py` , `test_zos_copy_func.py`, all + unit tests in directory `tests/unit/` then skips all tests in test suite `test_zos_copy_func.py` + (for demonstration) and parametrized tests `test_zos_backup_restore_unit.py::test_invalid_operation[restorE]` + and test_zoau_version_checker_unit.py::test_is_zoau_version_higher_than[True-sys_zoau1-1.2.1]/ + - get_test_cases(paths="/Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_job_submit_func.py,\\ + /Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_copy_func.py,\\ + /Users/ddimatos/git/gh/ibm_zos_core/tests/unit/",\\ + skip="/Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_copy_func.py,\\ + /Users/ddimatos/git/gh/ibm_zos_core/tests/unit/test_zos_backup_restore_unit.py::test_invalid_operation[restorE],\\ + /Users/ddimatos/git/gh/ibm_zos_core/tests/unit/test_zoau_version_checker_unit.py::test_is_zoau_version_higher_than[True-sys_zoau1-1.2.1]") + """ + + files =[] + parametrized_test_cases = [] + parametrized_test_cases_filtered_test_suites = [] + parametrized_test_cases_filtered_test_cases = [] + parameterized_tests = [] + ignore_test_suites = [] + ignore_test_cases = [] + + # Remove whitespace and replace CSV with single space delimiter. + # Build a command that will yield all test cases including parametrized tests. + cmd = ['pytest', '--collect-only', '-q'] + if paths: + files = " ".join(paths.split()) + files = files.strip().replace(',', ' ').split() + + for file in files: + file_path = Path(file) + try: + file_path.resolve(strict=True) + except FileNotFoundError as e: + raise FileNotFoundError(f'{file_path} does not exist.') from e + cmd.extend(files) + else: + raise ValueError("Required files have not been provided.") + + cmd.append('| grep ::') + cmd_str = ' '.join(cmd) + + # Run the pytest collect-only command and grep on '::' so to avoid warnings + parametrized_test_cases = subprocess.run([cmd_str], shell=True, capture_output=True, text=True, check=False) + # Remove duplicates in case test_suites or test_directories were repeated + parametrized_test_cases = list(set(parametrized_test_cases.stdout.split('\n'))) + # Remove the trailing line feed from the list else it will leave an empty list index and error. + parametrized_test_cases = list(filter(None, parametrized_test_cases)) + + # Skip can take any input, but note that test suites which start with 'test' and in in `.py` + # will supersede individual test cases. That is because if a test suite is being skipped it + # it should remove all test cases that match that test suite, hence the skipped are put into + # two buckets, 'ignore_test_cases' and 'ignore_test_suites' and 'ignore_test_suites' is evaluated + # first. + if skip: + skip=" ".join(skip.split()) + skip = skip.strip().replace(',', ' ').split() + for skipped in skip: + if '::' in skipped: # it's a test case + skipped_path = Path(skipped.split('::')[0]) + try: + skipped_path.resolve(strict=True) + except FileNotFoundError as e: + raise FileNotFoundError(f'{file_path} does not exist.') from e + # Only retain the sub-str because that is what pytest collect-only will yield + skipped = skipped.split("tests/")[1] + ignore_test_cases.append(skipped) + + if skipped.endswith('.py'): # it's a test suite + skipped_path = Path(skipped) + try: + skipped_path.resolve(strict=True) + except FileNotFoundError as e: + raise FileNotFoundError(f'{file_path} does not exist.') from e + # Only retain the sub-str because that is what pytest collect-only will yield + skipped = skipped.split("tests/")[1] + ignore_test_suites.append(skipped) + + # pytest --ignore,--deselect did not work as expected, will manually replicate the functionality + # If a path is in ignore_test_suites, it supersedes any ignore_test_cases substrings. + if len(ignore_test_suites) > 0: + parametrized_test_cases_filtered_test_suites = [p for p in parametrized_test_cases if all(t not in p for t in ignore_test_suites)] + if len(ignore_test_cases) > 0: + parametrized_test_cases_filtered_test_cases = [p for p in parametrized_test_cases if all(t not in p for t in ignore_test_cases)] + + if len(parametrized_test_cases_filtered_test_suites) > 0 and len(parametrized_test_cases_filtered_test_cases) > 0: + parametrized_test_cases_filtered_test_suites.extend(parametrized_test_cases_filtered_test_cases) + elif len(parametrized_test_cases_filtered_test_cases) > 0: + parameterized_tests = [f"tests/{parametrized}" for parametrized in parametrized_test_cases_filtered_test_cases] + + parameterized_tests = [f"tests/{parametrized}" for parametrized in parametrized_test_cases_filtered_test_suites] + return parameterized_tests + + parameterized_tests = [f"tests/{parametrized}" for parametrized in parametrized_test_cases] + + return parameterized_tests + +# Some adhoc testing until some test cases can be structured. +# def main(): +# print("Main") +# # plist = get_test_cases(paths="/Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_job_submit_func.py,\ +# # /Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_copy_func.py,\ +# # /Users/ddimatos/git/gh/ibm_zos_core/tests/unit/",\ +# # skip="/Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_copy_func.py,\ +# # /Users/ddimatos/git/gh/ibm_zos_core/tests/unit/test_zos_backup_restore_unit.py::test_invalid_operation[restorE],\ +# # /Users/ddimatos/git/gh/ibm_zos_core/tests/unit/test_zoau_version_checker_unit.py::test_is_zoau_version_higher_than[True-sys_zoau1-1.2.1]") +# # plist = get_test_cases(paths="/Users/ddimatos/git/gh/ibm_zos_core/tests/unit/") +# plist = get_test_cases(paths="/Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_tso_command_func.py,/Users/ddimatos/git/gh/ibm_zos_core/tests/functional/modules/test_zos_operator_func.py") +# print(str(plist)) +# if __name__ == '__main__': +# main() diff --git a/scripts/mounts.sh b/scripts/mounts.sh index a244bc6d6..765b57714 100755 --- a/scripts/mounts.sh +++ b/scripts/mounts.sh @@ -1,6 +1,6 @@ #!/bin/sh # ============================================================================== -# Copyright (c) IBM Corporation 2022, 2023 +# Copyright (c) IBM Corporation 2022, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -480,6 +480,10 @@ get_python_mount(){ fi } +get_python_mount_echo(){ + get_python_mount $1 + echo "${PYZ_HOME}" +} # Get the zoau home/path given $1/arg else error get_zoau_mount(){ @@ -507,6 +511,11 @@ get_zoau_mount(){ fi } +get_zoau_mount_echo(){ + get_zoau_mount $1 + echo "${ZOAU_HOME}" +} + # ============================================================================== # ********************* Print functions ********************* # ============================================================================== @@ -619,6 +628,12 @@ _test_arrays(){ # Main arg parser ################################################################################ case "$1" in + --get-python-mount) + get_python_mount_echo $2 + ;; + --get-zoau-mount) + get_zoau_mount_echo $2 + ;; --mount) mount "-r -t zfs -f" ;; @@ -653,4 +668,3 @@ case "$1" in fi fi esac - diff --git a/scripts/tests/test_load_balance.py b/scripts/tests/test_load_balance.py new file mode 100644 index 000000000..ea6b928e6 --- /dev/null +++ b/scripts/tests/test_load_balance.py @@ -0,0 +1,320 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) IBM Corporation 2024 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import random +import os +import sys + +__metaclass__ = type + +def test_case_1(): + assert 1 == 1 + +def test_case_2(): + assert 2 == 2 + +def test_case_3(): + assert 3 == 3 + +def test_case_4(): + assert 4 == 4 + +def test_case_5(): + assert 5 == 5 + +def test_case_6(): + assert 6 == 6 + +def test_case_7(): + assert 7 == 7 + +def test_case_8(): + assert 8 == 8 + +def test_case_9(): + assert 9 == 9 + +def test_case_10(): + assert 10 == 10 + +def test_case_11(): + assert 11 == 11 + +def test_case_12(): + assert 12 == 12 + +def test_case_13(): + print("FOO") + assert 13 == 13 + +def test_case_14(): + assert 14 == 14 + +def test_case_15(): + assert 15 == 15 + +def test_case_16(): + assert 16 == 16 + +def test_case_17(): + assert 17 == 17 + +def test_case_18(): + assert 18 == 18 + +def test_case_19(): + assert 19 == 19 + +def test_case_20(): + assert 20 == 20 + +def test_case_21(): + assert 21 == 21 + +def test_case_22(): + assert 22 == 22 + +def test_case_23(): + assert 23 == 23 + +def test_case_24(): + assert 24 == 24 + +def test_case_25(): + assert 25 == 25 + +def test_case_26(): + assert 26 == 26 + +def test_case_27(): + assert 27 == 27 + +def test_case_28(): + assert 28 == 28 + +def test_case_29(): + assert 29 == 29 + +def test_case_30(): + assert 30 == 30 + +def test_case_31(): + assert 31 == 31 + +def test_case_32(): + assert 32 == 32 + +def test_case_33(): + assert 33 == 33 + +def test_case_34(): + assert 34 == 34 + +def test_case_35(): + assert 35 == 35 + +def test_case_36(): + assert 36 == 36 + +def test_case_37(): + assert 37 == 37 + +def test_case_38(): + assert 38 == 38 + +def test_case_39(): + assert 39 == 39 + +def test_case_40(): + assert 40 == 40 + +def test_case_41(): + assert 41 == 41 + +def test_case_42(): + assert 42 == 42 + +def test_case_43(): + assert 43 == 43 + +def test_case_44(): + assert 44 == 44 + +def test_case_45(): + assert 45 == 45 + +def test_case_46(): + assert 46 == 46 + +def test_case_47(): + assert 47 == 47 + +def test_case_48(): + assert 48 == 48 + +def test_case_49(): + assert 49 == 49 + +def test_case_50(): + assert 50 == 50 + +def test_case_51(): + assert 51 == 51 + +def test_case_52(): + assert 52 == 52 + +def test_case_53(): + assert 53 == 53 + +def test_case_54(): + assert 54 == 54 + +def test_case_55(): + assert 55 == 55 + +def test_case_56(): + assert 56 == 56 + +def test_case_57(): + assert 57 == 57 + +def test_case_58(): + assert 58 == 58 + +def test_case_59(): + assert 59 == 59 + +def test_case_60(): + assert 60 == 60 + +def test_case_61(): + assert 61 == 61 + +def test_case_62(): + assert 62 == 62 + +def test_case_63(): + assert 63 == 63 + +def test_case_64(): + assert 64 == 64 + +def test_case_65(): + assert 65 == 65 + +def test_case_66(): + assert 66 == 66 + +def test_case_67(): + assert 67 == 67 + +def test_case_68(): + assert 68 == 68 + +def test_case_69(): + assert 69 == 69 + +def test_case_70(): + assert 70 == 70 + +def test_case_71(): + assert 71 == 71 + +def test_case_72(): + assert 72 == 72 + +def test_case_73(): + assert 73 == 73 + +def test_case_74(): + assert 74 == 74 + +def test_case_75(): + assert 75 == 75 + +def test_case_76(): + assert 76 == 76 + +def test_case_77(): + assert 77 == 77 + +def test_case_78(): + assert 78 == 78 + +def test_case_79(): + assert 79 == 79 + +def test_case_80(): + assert 80 == 80 + +def test_case_81(): + assert 81 == 81 + +def test_case_82(): + assert 82 == 82 + +def test_case_83(): + assert 83 == 83 + +def test_case_84(): + assert 84 == 84 + +def test_case_85(): + assert 85 == 85 + +def test_case_86(): + assert 86 == 86 + +def test_case_87(): + assert 87 == 87 + +def test_case_88(): + assert 88 == 88 + +def test_case_89(): + assert 89 == 89 + +def test_case_90(): + assert 90 == 90 + +def test_case_91(): + assert 91 == 91 + +def test_case_92(): + assert 8 == random.randrange(7, 9) + +def test_case_93(): + assert 8 == random.randrange(7, 9) + +def test_case_94(): + assert 8 == random.randrange(7, 9) + +def test_case_95(): + assert 8 == random.randrange(7, 9) + +def test_case_96(): + assert 8 == random.randrange(7, 9) + +def test_case_97(): + assert 8 == random.randrange(7, 9) + +def test_case_98(): + assert 98 == -1 + +def test_case_99(): + assert 99 == -1 + +def test_case_100(): + assert 100 == -1 diff --git a/scripts/venv.sh b/scripts/venv.sh index 45c3d130e..3b662536b 100755 --- a/scripts/venv.sh +++ b/scripts/venv.sh @@ -81,7 +81,7 @@ echo_requirements(){ unset requirements_common unset requirements - requirements_common="requirements-common.env" + requirements_common="configurations/requirements-common.env" unset REQ_COMMON if [ -f "$requirements_common" ]; then @@ -103,9 +103,7 @@ echo_requirements(){ fi done - # for file in `ls requirements-*.sh`; do - # for file in `ls requirements-[0-9].[0-9]*.env`; do - for file in `ls *requirements-[0-9].[0-9]*.env* *requirements-latest* 2>/dev/null`; do + for file in `ls configurations/*requirements-[0-9].[0-9]*.env* configurations/*requirements-latest* 2>/dev/null`; do # Unset the vars from any prior sourced files unset REQ unset requirements @@ -118,11 +116,11 @@ echo_requirements(){ fi if [[ "$file" =~ "latest" ]]; then - # eg extract 'latest' from requirements-latest file name + # eg extract 'latest' from configurations/requirements-latest file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1` venv_name="venv"-$ansible_version else - # eg extract 2.14 from requirements-2.14.sh file name + # eg extract 2.14 from configurations/requirements-2.14.sh file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1,2` venv_name="venv"-$ansible_version #echo $venv_name @@ -169,14 +167,13 @@ make_venv_dirs(){ # We should think about the idea of allowing: # --force, --synch, --update thus not sure we need this method and better to # manage this logic inline to write_req - # for file in `ls requirements-[0-9].[0-9]*.env`; do - for file in `ls *requirements-[0-9].[0-9]*.env* *requirements-latest* 2>/dev/null`; do + for file in `ls configurations/*requirements-[0-9].[0-9]*.env* configurations/*requirements-latest* 2>/dev/null`; do if [[ "$file" =~ "latest" ]]; then - # eg extract 'latest' from requirements-latest file name + # eg extract 'latest' from configurations/requirements-latest file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1` venv_name="venv"-$ansible_version else - # eg extract 2.14 from requirements-2.14.sh file name + # eg extract 2.14 from configurations/requirements-2.14.sh file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1,2` venv_name="venv"-$ansible_version #echo $venv_name @@ -191,7 +188,7 @@ write_requirements(){ unset requirements unset REQ unset REQ_COMMON - requirements_common_file="requirements-common.env" + requirements_common_file="configurations/requirements-common.env" # Source the requirements file for now, easy way to do this. Exit may not # not be needed but leave it for now. @@ -214,9 +211,7 @@ write_requirements(){ fi done - # for file in `ls requirements-*.sh`; do - # for file in `ls requirements-[0-9].[0-9]*.env`; do - for file in `ls *requirements-[0-9].[0-9]*.env* *requirements-latest* 2>/dev/null`; do + for file in `ls configurations/*requirements-[0-9].[0-9]*.env* configurations/*requirements-latest* 2>/dev/null`; do # Unset the vars from any prior sourced files unset REQ unset requirements @@ -229,12 +224,12 @@ write_requirements(){ fi if [[ "$file" =~ "latest" ]]; then - # eg extract 'latest' from requirements-latest file name + # eg extract 'latest' from configurations/requirements-latest file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1` venv_name="venv"-$ansible_version echo $venv_name else - # eg extract 2.14 from requirements-2.14.sh file name + # eg extract 2.14 from configurations/requirements-2.14.sh file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1,2` venv_name="venv"-$ansible_version echo $venv_name @@ -288,6 +283,9 @@ write_requirements(){ cp hosts.env "${VENV_HOME_MANAGED}"/"${venv_name}"/ cp venv.sh "${VENV_HOME_MANAGED}"/"${venv_name}"/ cp profile.sh "${VENV_HOME_MANAGED}"/"${venv_name}"/ + cp ../tests/dependencyfinder.py "${VENV_HOME_MANAGED}"/"${venv_name}"/ + cp ce.py "${VENV_HOME_MANAGED}"/"${venv_name}"/ + cp -R modules "${VENV_HOME_MANAGED}"/"${venv_name}"/ # Decrypt file if [ "$option_pass" ]; then @@ -318,16 +316,15 @@ write_requirements(){ create_venv_and_pip_install_req(){ - # for file in `ls requirements-[0-9].[0-9]*.env`; do - for file in `ls *requirements-[0-9].[0-9]*.env* *requirements-latest* 2>/dev/null`; do + for file in `ls configurations/*requirements-[0-9].[0-9]*.env* configurations/*requirements-latest* 2>/dev/null`; do unset venv if [[ "$file" =~ "latest" ]]; then - # eg extract 'latest' from requirements-latest file name + # eg extract 'latest' from configurations/requirements-latest file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1` venv_name="venv"-$ansible_version else - # eg extract 2.14 from requirements-2.14.sh file name + # eg extract 2.14 from configurations/requirements-2.14.sh file name ansible_version=`echo $file | cut -d"-" -f2|cut -d"." -f1,2` venv_name="venv"-$ansible_version #echo $venv_name @@ -382,7 +379,7 @@ discover_python(){ # for python_found in `which python3 | cut -d" " -f3`; do # # The 'pys' array will search for pythons in reverse order, once it finds one that matches - # the requirements-x.xx.env it does not continue searching. Reverse order is important to + # the configurations/requirements-x.xx.env it does not continue searching. Reverse order is important to # maintain. pys=("python3.14" "python3.13" "python3.12" "python3.11" "python3.10" "python3.9" "python3.8") rc=1 @@ -536,6 +533,39 @@ get_host_ids(){ done } +get_host_ids_production(){ + set_hosts_to_array + unset host_index + unset host_prefix + unset host_production + first_entry=true + for tgt in "${HOSTS_ALL[@]}" ; do + host_index=`echo "${tgt}" | cut -d ":" -f 1` + host_prefix=`echo "${tgt}" | cut -d ":" -f 2` + host_production=`echo "${tgt}" | cut -d ":" -f 5` + if [ "$host_production" == "production" ];then + if [ "$first_entry" == "true" ];then + first_entry=false + echo "$host_prefix" + else + echo " $host_prefix" + fi + fi + done +} + + first_entry=true + skip_tests="" + for i in $(echo $skip | sed "s/,/ /g") + do + if [ "$first_entry" == "true" ];then + first_entry=false + skip_tests="$CURR_DIR/tests/functional/modules/$i" + else + skip_tests="$skip_tests $CURR_DIR/tests/functional/modules/$i" + fi + done + # Should renane this with a prefix of set_ to make it more readable ssh_host_credentials(){ arg=$1 @@ -647,6 +677,18 @@ case "$1" in ssh_host_credentials $2 ssh_copy_key ;; +--host-credentials) + ssh_host_credentials $2 + echo "$host" + ;; +--user-credentials) + ssh_host_credentials $2 + echo "$user" + ;; +--pass-credentials) + ssh_host_credentials $2 + echo "$pass" + ;; --host-setup-files) #ec33017a "mounts.env" "mounts.sh" "shell-helper.sh" "profile.sh" ssh_host_credentials $2 ssh_copy_files_and_mount $3 $4 $5 @@ -654,6 +696,9 @@ case "$1" in --targets) get_host_ids ;; +--targets-production) + get_host_ids_production + ;; --config) write_test_config $2 $3 $4 $5 ;; diff --git a/tests/conftest.py b/tests/conftest.py index 9a9cc9ad6..bfdb3fb4b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,21 +21,39 @@ def pytest_addoption(parser): - """ Add CLI options and modify optons for pytest-ansible where needed. """ + """ + Add CLI options and modify options for pytest-ansible where needed. + Note: Set the default to to None, otherwise when evaluating with `request.config.getoption("--zinventory"):` + will always return true because a default will be returned. + """ parser.addoption( "--zinventory", "-Z", action="store", - default="test_config.yml", + default=None, help="Absolute path to YAML file containing inventory info for functional testing.", ) + parser.addoption( + "--zinventory-raw", + "-R", + action="store", + default=None, + help="Str - dictionary with values {'host': 'ibm.com', 'user': 'root', 'zoau': '/usr/lpp/zoau', 'pyz': '/usr/lpp/IBM/pyz'}", + ) @pytest.fixture(scope="session") def z_python_interpreter(request): """ Generate temporary shell wrapper for python interpreter. """ - path = request.config.getoption("--zinventory") - helper = ZTestHelper.from_yaml_file(path) + src = None + helper = None + if request.config.getoption("--zinventory"): + src = request.config.getoption("--zinventory") + helper = ZTestHelper.from_yaml_file(src) + elif request.config.getoption("--zinventory-raw"): + src = request.config.getoption("--zinventory-raw") + helper = ZTestHelper.from_args(src) + interpreter_str = helper.build_interpreter_string() inventory = helper.get_inventory_info() python_path = helper.get_python_path() @@ -90,8 +108,17 @@ def ansible_zos_module(request, z_python_interpreter): def volumes_on_systems(ansible_zos_module, request): """ Call the pytest-ansible plugin to check volumes on the system and work properly a list by session.""" path = request.config.getoption("--zinventory") - list_Volumes = get_volumes(ansible_zos_module, path) - yield list_Volumes + list_volumes = None + + # If path is None, check if zinventory-raw is used instead and if so, extract the + # volumes dictionary and pass it along. + if path is None: + src = request.config.getoption("--zinventory-raw") + helper = ZTestHelper.from_args(src) + list_volumes = helper.get_volumes_list() + else: + list_volumes = get_volumes(ansible_zos_module, path) + yield list_volumes @pytest.fixture(scope="session") @@ -100,8 +127,18 @@ def volumes_with_vvds(ansible_zos_module, request): then it will try to create one for each volume found and return volumes only if a VVDS was successfully created for it.""" path = request.config.getoption("--zinventory") - volumes = get_volumes(ansible_zos_module, path) - volumes_with_vvds = get_volumes_with_vvds(ansible_zos_module, volumes) + list_volumes = None + + # If path is None, check if zinventory-raw is used instead and if so, extract the + # volumes dictionary and pass it along. + if path is None: + src = request.config.getoption("--zinventory-raw") + helper = ZTestHelper.from_args(src) + list_volumes = helper.get_volumes_list() + else: + list_volumes = get_volumes(ansible_zos_module, path) + + volumes_with_vvds = get_volumes_with_vvds(ansible_zos_module, list_volumes) yield volumes_with_vvds diff --git a/tests/functional/modules/test_module_security.py b/tests/functional/modules/test_module_security.py index 744d8f595..4c3af3c15 100644 --- a/tests/functional/modules/test_module_security.py +++ b/tests/functional/modules/test_module_security.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2020 +# Copyright (c) IBM Corporation 2020, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/tests/functional/modules/test_zos_apf_func.py b/tests/functional/modules/test_zos_apf_func.py index 918a4d36c..8fe0f4455 100644 --- a/tests/functional/modules/test_zos_apf_func.py +++ b/tests/functional/modules/test_zos_apf_func.py @@ -267,11 +267,6 @@ def test_add_del_volume_persist(ansible_zos_module, volumes_with_vvds): clean_test_env(hosts, test_info) -# keyword: ENABLE-FOR-1-3 -# Test commented because there is a failure in ZOAU 1.2.x, that should be fixed in 1.3.x, so -# whoever works in issue https://github.com/ansible-collections/ibm_zos_core/issues/726 -# should uncomment this test as part of the validation process. - def test_batch_add_del(ansible_zos_module, volumes_with_vvds): try: hosts = ansible_zos_module diff --git a/tests/functional/modules/test_zos_backup_restore.py b/tests/functional/modules/test_zos_backup_restore.py index aa25110f8..1b01bebc7 100644 --- a/tests/functional/modules/test_zos_backup_restore.py +++ b/tests/functional/modules/test_zos_backup_restore.py @@ -852,5 +852,5 @@ def test_backup_into_gds(ansible_zos_module, dstype): assert result.get("changed") is True assert result.get("module_stderr") is None finally: - hosts.all.shell(cmd=f"drm ANSIBLE.* ") + hosts.all.shell(cmd=f"drm ANSIBLE.* ; drm OMVSADM.*") diff --git a/tests/functional/modules/test_zos_blockinfile_func.py b/tests/functional/modules/test_zos_blockinfile_func.py index 2f9e6d3c2..84d0850da 100644 --- a/tests/functional/modules/test_zos_blockinfile_func.py +++ b/tests/functional/modules/test_zos_blockinfile_func.py @@ -1539,17 +1539,39 @@ def test_uss_encoding(ansible_zos_module, encoding): results = hosts.all.zos_blockinfile(**params) for result in results.contacted.values(): assert result.get("changed") == 1 - results = hosts.all.shell(cmd="cat {0}".format(params["path"])) + results = hosts.all.shell(cmd="cat \"//'{0}'\" ".format(params["src"])) + for result in results.contacted.values(): + assert result.get("stdout") == "# BEGIN ANSIBLE MANAGED BLOCK\nZOAU_ROOT=/mvsutil-develop_dsed\nZOAU_HOME=$ZOAU_ROOT\nZOAU_DIR=$ZOAU_ROOT\n# END ANSIBLE MANAGED BLOCK" + + params["src"] = ds_name + "(-1)" + results = hosts.all.zos_blockinfile(**params) + for result in results.contacted.values(): + assert result.get("changed") == 1 + results = hosts.all.shell(cmd="cat \"//'{0}'\" ".format(params["src"])) + for result in results.contacted.values(): + assert result.get("stdout") == "# BEGIN ANSIBLE MANAGED BLOCK\nZOAU_ROOT=/mvsutil-develop_dsed\nZOAU_HOME=$ZOAU_ROOT\nZOAU_DIR=$ZOAU_ROOT\n# END ANSIBLE MANAGED BLOCK" + + params_w_bck = dict(insertafter="eof", block="export ZOAU_ROOT\nexport ZOAU_HOME\nexport ZOAU_DIR", state="present", backup=True, backup_name=ds_name + "(+1)") + params_w_bck["src"] = ds_name + "(-1)" + results = hosts.all.zos_blockinfile(**params_w_bck) + for result in results.contacted.values(): + assert result.get("changed") == 1 + assert result.get("rc") == 0 + backup = ds_name + "(0)" + results = hosts.all.shell(cmd="cat \"//'{0}'\" ".format(backup)) + for result in results.contacted.values(): + assert result.get("stdout") == "# BEGIN ANSIBLE MANAGED BLOCK\nZOAU_ROOT=/mvsutil-develop_dsed\nZOAU_HOME=$ZOAU_ROOT\nZOAU_DIR=$ZOAU_ROOT\n# END ANSIBLE MANAGED BLOCK" + + params["src"] = ds_name + "(-3)" + results = hosts.all.zos_blockinfile(**params) for result in results.contacted.values(): - assert result.get("stdout") == EXPECTED_ENCODING + assert result.get("changed") == 0 finally: - remove_uss_environment(ansible_zos_module) + hosts.all.shell(cmd="""drm "ANSIBLE.*" """) @pytest.mark.ds -@pytest.mark.parametrize("dstype", DS_TYPE) -@pytest.mark.parametrize("encoding", ["IBM-1047"]) -def test_ds_encoding(ansible_zos_module, encoding, dstype): +def test_special_characters_ds_insert_block(ansible_zos_module): hosts = ansible_zos_module ds_type = dstype insert_data = "Insert this string" @@ -1592,9 +1614,21 @@ def test_ds_encoding(ansible_zos_module, encoding, dstype): ) results = hosts.all.shell(cmd="cat \"//'{0}'\" ".format(params["path"])) for result in results.contacted.values(): - assert result.get("stdout") == EXPECTED_ENCODING + assert result.get("stdout") == "# BEGIN ANSIBLE MANAGED BLOCK\nZOAU_ROOT=/mvsutil-develop_dsed\nZOAU_HOME=$ZOAU_ROOT\nZOAU_DIR=$ZOAU_ROOT\n# END ANSIBLE MANAGED BLOCK" + + params_w_bck = dict(insertafter="eof", block="export ZOAU_ROOT\nexport ZOAU_HOME\nexport ZOAU_DIR", state="present", backup=True, backup_name=backup) + params_w_bck["src"] = ds_name + results = hosts.all.zos_blockinfile(**params_w_bck) + for result in results.contacted.values(): + assert result.get("changed") == 1 + assert result.get("rc") == 0 + backup = backup.replace('$', "\$") + results = hosts.all.shell(cmd="cat \"//'{0}'\" ".format(backup)) + for result in results.contacted.values(): + assert result.get("stdout") == "# BEGIN ANSIBLE MANAGED BLOCK\nZOAU_ROOT=/mvsutil-develop_dsed\nZOAU_HOME=$ZOAU_ROOT\nZOAU_DIR=$ZOAU_ROOT\n# END ANSIBLE MANAGED BLOCK" + finally: - remove_ds_environment(ansible_zos_module, ds_name) + hosts.all.shell(cmd="""drm "ANSIBLE.*" """) ######################### diff --git a/tests/functional/modules/test_zos_copy_func.py b/tests/functional/modules/test_zos_copy_func.py index e8e37375c..76c75dd32 100644 --- a/tests/functional/modules/test_zos_copy_func.py +++ b/tests/functional/modules/test_zos_copy_func.py @@ -2348,6 +2348,18 @@ def test_copy_ps_to_existing_uss_file(ansible_zos_module, force): src_ds = TEST_PS dest = "/tmp/ddchkpt" + hosts = ansible_zos_module + mlq_size = 3 + cobol_src_pds = get_tmp_ds_name(mlq_size) + cobol_src_mem = "HELLOCBL" + cobol_src_mem2 = "HICBL2" + src_lib = get_tmp_ds_name(mlq_size) + dest_lib = get_tmp_ds_name(mlq_size) + dest_lib_aliases = get_tmp_ds_name(mlq_size) + pgm_mem = "HELLO" + pgm2_mem = "HELLO2" + pgm_mem_alias = "ALIAS1" + pgm2_mem_alias = "ALIAS2" try: hosts.all.file(path=dest, state="touch") @@ -2372,6 +2384,23 @@ def test_copy_ps_to_existing_uss_file(ansible_zos_module, force): finally: hosts.all.file(path=dest, state="absent") + else: + # copy src loadlib to dest library pds w/o aliases + copy_res = hosts.all.zos_copy( + src="{0}".format(src_lib), + dest="{0}".format(dest_lib), + remote_src=True, + executable=True, + aliases=False + ) + # copy src loadlib to dest library pds w aliases + copy_res_aliases = hosts.all.zos_copy( + src="{0}".format(src_lib), + dest="{0}".format(dest_lib_aliases), + remote_src=True, + executable=True, + aliases=True + ) @pytest.mark.uss @pytest.mark.seq @@ -2414,6 +2443,69 @@ def test_copy_ps_to_non_existing_ps(ansible_zos_module): cmd="cat \"//'{0}'\"".format(dest), executable=SHELL_EXECUTABLE ) + # Copying the remote loadlibs in USS to a local dir. + # This section ONLY handles ONE host, so if we ever use multiple hosts to + # test, we will need to update this code. + remote_user = hosts["options"]["user"] + # Removing a trailing comma because the framework saves the hosts list as a + # string instead of a list. + remote_host = hosts["options"]["inventory"].replace(",", "") + + tmp_folder = tempfile.TemporaryDirectory(prefix="tmpfetch") + cmd = [ + "sftp", + "-r", + f"{remote_user}@{remote_host}:{uss_location}", + f"{tmp_folder.name}" + ] + with subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE) as sftp_proc: + result = sftp_proc.stdout.read() + + source_path = os.path.join(tmp_folder.name, os.path.basename(uss_location)) + + if not is_created: + # ensure dest data sets absent for this variation of the test case. + hosts.all.zos_data_set(name=dest_lib, state="absent") + else: + # allocate dest loadlib to copy over without an alias. + hosts.all.zos_data_set( + name=dest_lib, + state="present", + type="pdse", + record_format="u", + record_length=0, + block_size=32760, + space_primary=2, + space_type="m", + replace=True + ) + + if not is_created: + # dest data set does not exist, specify it in dest_dataset param. + # copy src loadlib to dest library pds w/o aliases + copy_res = hosts.all.zos_copy( + src=source_path, + dest="{0}".format(dest_lib), + executable=True, + aliases=False, + dest_data_set={ + 'type': "pdse", + 'record_format': "u", + 'record_length': 0, + 'block_size': 32760, + 'space_primary': 2, + 'space_type': "m", + } + ) + else: + # copy src loadlib to dest library pds w/o aliases + copy_res = hosts.all.zos_copy( + src=source_path, + dest="{0}".format(dest_lib), + executable=True, + aliases=False + ) + for result in copy_res.contacted.values(): assert result.get("msg") is None assert result.get("changed") is True @@ -2480,6 +2572,7 @@ def test_copy_ps_to_non_empty_ps(ansible_zos_module, force): assert result.get("rc") == 0 assert result.get("stdout") != "" finally: + hosts.all.shell(cmd='rm -r /tmp/c') hosts.all.zos_data_set(name=dest, state="absent") diff --git a/tests/functional/modules/test_zos_job_submit_func.py b/tests/functional/modules/test_zos_job_submit_func.py index 927dcfaad..ffd920259 100644 --- a/tests/functional/modules/test_zos_job_submit_func.py +++ b/tests/functional/modules/test_zos_job_submit_func.py @@ -18,8 +18,11 @@ import tempfile import re import os +import string +import random from shellescape import quote import pytest +from datetime import datetime from ibm_zos_core.tests.helpers.volumes import Volume_Handler from ibm_zos_core.tests.helpers.dataset import get_tmp_ds_name @@ -398,8 +401,9 @@ // """ -TEMP_PATH = "/tmp/jcl" -DATA_SET_NAME_SPECIAL_CHARS = "imstestl.im@1.x#$xx05" +def get_unique_uss_file_name(): + unique_str = "n" + datetime.now().strftime("%H:%M:%S").replace("-", "").replace(":", "") + ".dzp" + return "/tmp/{0}".format(unique_str) @pytest.mark.parametrize( "location", [ @@ -422,9 +426,10 @@ def test_job_submit_pds(ansible_zos_module, location): results = None hosts = ansible_zos_module data_set_name = get_tmp_ds_name() - hosts.all.file(path=TEMP_PATH, state="directory") + temp_path = get_unique_uss_file_name() + hosts.all.file(path=temp_path, state="directory") hosts.all.shell( - cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) + cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), temp_path) ) hosts.all.zos_data_set( @@ -432,7 +437,7 @@ def test_job_submit_pds(ansible_zos_module, location): ) hosts.all.shell( - cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, data_set_name) + cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(temp_path, data_set_name) ) if bool(location.get("default_location")): results = hosts.all.zos_job_submit( @@ -448,30 +453,32 @@ def test_job_submit_pds(ansible_zos_module, location): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") hosts.all.zos_data_set(name=data_set_name, state="absent") def test_job_submit_pds_special_characters(ansible_zos_module): try: hosts = ansible_zos_module - hosts.all.file(path=TEMP_PATH, state="directory") + temp_path = get_unique_uss_file_name() + data_set_name_special_chars = get_tmp_ds_name(symbols=True) + hosts.all.file(path=temp_path, state="directory") hosts.all.shell( - cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) + cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), temp_path) ) results = hosts.all.zos_data_set( - name=DATA_SET_NAME_SPECIAL_CHARS, state="present", type="pds", replace=True + name=data_set_name_special_chars, state="present", type="pds", replace=True ) hosts.all.shell( - cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) + cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), temp_path) ) hosts.all.shell( cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format( - TEMP_PATH, DATA_SET_NAME_SPECIAL_CHARS.replace('$', '\$') + temp_path, data_set_name_special_chars.replace('$', '\$') ) ) results = hosts.all.zos_job_submit( - src="{0}(SAMPLE)".format(DATA_SET_NAME_SPECIAL_CHARS), + src="{0}(SAMPLE)".format(data_set_name_special_chars), location="data_set", ) for result in results.contacted.values(): @@ -479,26 +486,27 @@ def test_job_submit_pds_special_characters(ansible_zos_module): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") - hosts.all.zos_data_set(name=DATA_SET_NAME_SPECIAL_CHARS, state="absent") + hosts.all.file(path=temp_path, state="absent") + hosts.all.zos_data_set(name=data_set_name_special_chars, state="absent") def test_job_submit_uss(ansible_zos_module): try: hosts = ansible_zos_module - hosts.all.file(path=TEMP_PATH, state="directory") + temp_path = get_unique_uss_file_name() + hosts.all.file(path=temp_path, state="directory") hosts.all.shell( - cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) + cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), temp_path) ) results = hosts.all.zos_job_submit( - src=f"{TEMP_PATH}/SAMPLE", location="uss", volume=None + src=f"{temp_path}/SAMPLE", location="uss", volume=None ) for result in results.contacted.values(): assert result.get("jobs")[0].get("ret_code").get("msg_code") == "0000" assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") def test_job_submit_local(ansible_zos_module): @@ -544,12 +552,13 @@ def test_job_submit_pds_volume(ansible_zos_module, volumes_on_systems): try: hosts = ansible_zos_module data_set_name = get_tmp_ds_name() + temp_path = get_unique_uss_file_name() volumes = Volume_Handler(volumes_on_systems) volume_1 = volumes.get_available_vol() - hosts.all.file(path=TEMP_PATH, state="directory") + hosts.all.file(path=temp_path, state="directory") hosts.all.shell( - cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) + cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), temp_path) ) hosts.all.zos_data_set( @@ -557,7 +566,7 @@ def test_job_submit_pds_volume(ansible_zos_module, volumes_on_systems): ) hosts.all.shell( - cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(TEMP_PATH, data_set_name) + cmd="cp {0}/SAMPLE \"//'{1}(SAMPLE)'\"".format(temp_path, data_set_name) ) hosts.all.zos_data_set( @@ -574,7 +583,7 @@ def test_job_submit_pds_volume(ansible_zos_module, volumes_on_systems): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get('changed') is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") hosts.all.zos_data_set(name=data_set_name, state="absent") @@ -582,11 +591,12 @@ def test_job_submit_pds_5_sec_job_wait_15(ansible_zos_module): try: hosts = ansible_zos_module data_set_name = get_tmp_ds_name() - hosts.all.file(path=TEMP_PATH, state="directory") + temp_path = get_unique_uss_file_name() + hosts.all.file(path=temp_path, state="directory") wait_time_s = 15 hosts.all.shell( - cmd=f"echo {quote(JCL_FILE_CONTENTS_05_SEC)} > {TEMP_PATH}/BPXSLEEP" + cmd=f"echo {quote(JCL_FILE_CONTENTS_05_SEC)} > {temp_path}/BPXSLEEP" ) hosts.all.zos_data_set( @@ -594,7 +604,7 @@ def test_job_submit_pds_5_sec_job_wait_15(ansible_zos_module): ) hosts.all.shell( - cmd=f"cp {TEMP_PATH}/BPXSLEEP \"//'{data_set_name}(BPXSLEEP)'\"" + cmd=f"cp {temp_path}/BPXSLEEP \"//'{data_set_name}(BPXSLEEP)'\"" ) hosts = ansible_zos_module @@ -607,7 +617,7 @@ def test_job_submit_pds_5_sec_job_wait_15(ansible_zos_module): assert result.get('changed') is True assert result.get('duration') <= wait_time_s finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") hosts.all.zos_data_set(name=data_set_name, state="absent") @@ -615,11 +625,12 @@ def test_job_submit_pds_30_sec_job_wait_60(ansible_zos_module): try: hosts = ansible_zos_module data_set_name = get_tmp_ds_name() - hosts.all.file(path=TEMP_PATH, state="directory") + temp_path = get_unique_uss_file_name() + hosts.all.file(path=temp_path, state="directory") wait_time_s = 60 hosts.all.shell( - cmd=f"echo {quote(JCL_FILE_CONTENTS_30_SEC)} > {TEMP_PATH}/BPXSLEEP" + cmd=f"echo {quote(JCL_FILE_CONTENTS_30_SEC)} > {temp_path}/BPXSLEEP" ) hosts.all.zos_data_set( @@ -627,7 +638,7 @@ def test_job_submit_pds_30_sec_job_wait_60(ansible_zos_module): ) hosts.all.shell( - cmd=f"cp {TEMP_PATH}/BPXSLEEP \"//'{data_set_name}(BPXSLEEP)'\"" + cmd=f"cp {temp_path}/BPXSLEEP \"//'{data_set_name}(BPXSLEEP)'\"" ) hosts = ansible_zos_module @@ -640,7 +651,7 @@ def test_job_submit_pds_30_sec_job_wait_60(ansible_zos_module): assert result.get('changed') is True assert result.get('duration') <= wait_time_s finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") hosts.all.zos_data_set(name=data_set_name, state="absent") def test_job_submit_pds_30_sec_job_wait_10_negative(ansible_zos_module): @@ -648,11 +659,12 @@ def test_job_submit_pds_30_sec_job_wait_10_negative(ansible_zos_module): try: hosts = ansible_zos_module data_set_name = get_tmp_ds_name() - hosts.all.file(path=TEMP_PATH, state="directory") + temp_path = get_unique_uss_file_name() + hosts.all.file(path=temp_path, state="directory") wait_time_s = 10 hosts.all.shell( - cmd=f"echo {quote(JCL_FILE_CONTENTS_30_SEC)} > {TEMP_PATH}/BPXSLEEP" + cmd=f"echo {quote(JCL_FILE_CONTENTS_30_SEC)} > {temp_path}/BPXSLEEP" ) hosts.all.zos_data_set( @@ -660,7 +672,7 @@ def test_job_submit_pds_30_sec_job_wait_10_negative(ansible_zos_module): ) hosts.all.shell( - cmd=f"cp {TEMP_PATH}/BPXSLEEP \"//'{data_set_name}(BPXSLEEP)'\"" + cmd=f"cp {temp_path}/BPXSLEEP \"//'{data_set_name}(BPXSLEEP)'\"" ) hosts = ansible_zos_module @@ -674,7 +686,7 @@ def test_job_submit_pds_30_sec_job_wait_10_negative(ansible_zos_module): # expecting at least "long running job that exceeded its maximum wait" assert re.search(r'exceeded', repr(result.get("msg"))) finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") hosts.all.zos_data_set(name=data_set_name, state="absent") @@ -823,12 +835,13 @@ def test_job_submit_jinja_template(ansible_zos_module, args): def test_job_submit_full_input(ansible_zos_module): try: hosts = ansible_zos_module - hosts.all.file(path=TEMP_PATH, state="directory") + temp_path = get_unique_uss_file_name() + hosts.all.file(path=temp_path, state="directory") hosts.all.shell( - cmd=f"echo {quote(JCL_FULL_INPUT)} > {TEMP_PATH}/SAMPLE" + cmd=f"echo {quote(JCL_FULL_INPUT)} > {temp_path}/SAMPLE" ) results = hosts.all.zos_job_submit( - src=f"{TEMP_PATH}/SAMPLE", + src=f"{temp_path}/SAMPLE", location="uss", volume=None, # This job used to set wait=True, but since it has been deprecated @@ -840,7 +853,7 @@ def test_job_submit_full_input(ansible_zos_module): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") def test_negative_job_submit_local_jcl_no_dsn(ansible_zos_module): tmp_file = tempfile.NamedTemporaryFile(delete=True) @@ -975,18 +988,19 @@ def test_job_from_gdg_source(ansible_zos_module, generation): try: # Creating a GDG for the test. source = get_tmp_ds_name() + temp_path = get_unique_uss_file_name() gds_name = f"{source}({generation})" hosts.all.zos_data_set(name=source, state="present", type="gdg", limit=3) hosts.all.zos_data_set(name=f"{source}(+1)", state="present", type="seq") hosts.all.zos_data_set(name=f"{source}(+1)", state="present", type="seq") # Copying the JCL to the GDS. - hosts.all.file(path=TEMP_PATH, state="directory") + hosts.all.file(path=temp_path, state="directory") hosts.all.shell( - cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), TEMP_PATH) + cmd="echo {0} > {1}/SAMPLE".format(quote(JCL_FILE_CONTENTS), temp_path) ) hosts.all.shell( - cmd="dcp '{0}/SAMPLE' '{1}'".format(TEMP_PATH, gds_name) + cmd="dcp '{0}/SAMPLE' '{1}'".format(temp_path, gds_name) ) results = hosts.all.zos_job_submit(src=gds_name, location="data_set") @@ -995,7 +1009,7 @@ def test_job_from_gdg_source(ansible_zos_module, generation): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") hosts.all.zos_data_set(name=f"{source}(0)", state="absent") hosts.all.zos_data_set(name=f"{source}(-1)", state="absent") hosts.all.zos_data_set(name=source, state="absent") @@ -1049,16 +1063,17 @@ def test_inexistent_positive_gds(ansible_zos_module): def test_zoau_bugfix_invalid_utf8_chars(ansible_zos_module): try: hosts = ansible_zos_module + temp_path = get_unique_uss_file_name() # Copy C source and compile it. - hosts.all.file(path=TEMP_PATH, state="directory") + hosts.all.file(path=temp_path, state="directory") hosts.all.shell( - cmd=f"echo {quote(C_SRC_INVALID_UTF8)} > {TEMP_PATH}/noprint.c" + cmd=f"echo {quote(C_SRC_INVALID_UTF8)} > {temp_path}/noprint.c" ) - hosts.all.shell(cmd=f"xlc -o {TEMP_PATH}/noprint {TEMP_PATH}/noprint.c") + hosts.all.shell(cmd=f"xlc -o {temp_path}/noprint {temp_path}/noprint.c") # Create local JCL and submit it. tmp_file = tempfile.NamedTemporaryFile(delete=True) with open(tmp_file.name, "w",encoding="utf-8") as f: - f.write(JCL_INVALID_UTF8_CHARS_EXC.format(TEMP_PATH)) + f.write(JCL_INVALID_UTF8_CHARS_EXC.format(temp_path)) results = hosts.all.zos_job_submit( src=tmp_file.name, @@ -1073,4 +1088,4 @@ def test_zoau_bugfix_invalid_utf8_chars(ansible_zos_module): assert result.get("jobs")[0].get("ret_code").get("code") == 0 assert result.get("changed") is True finally: - hosts.all.file(path=TEMP_PATH, state="absent") + hosts.all.file(path=temp_path, state="absent") diff --git a/tests/functional/modules/test_zos_operator_func.py b/tests/functional/modules/test_zos_operator_func.py index eb1bf1f60..123537d8b 100644 --- a/tests/functional/modules/test_zos_operator_func.py +++ b/tests/functional/modules/test_zos_operator_func.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) IBM Corporation 2019, 2023 +# Copyright (c) IBM Corporation 2019, 2024 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -15,12 +15,11 @@ __metaclass__ = type -import pytest -import yaml import os +import yaml from shellescape import quote -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import ( +from ibm_zos_core.plugins.module_utils import ( zoau_version_checker, ) @@ -97,10 +96,10 @@ def test_zos_operator_invalid_command_to_ensure_transparency(ansible_zos_module) results = hosts.all.zos_operator(cmd="DUMP COMM=('ERROR DUMP')", verbose=False) for result in results.contacted.values(): assert result.get("changed") is True - transparency = False - if any('DUMP COMMAND' in str for str in result.get("content")): - transparency = True - assert transparency + transparency = False + if any('DUMP COMMAND' in str for str in result.get("content")): + transparency = True + assert transparency def test_zos_operator_positive_path(ansible_zos_module): @@ -120,6 +119,7 @@ def test_zos_operator_positive_path_verbose(ansible_zos_module): assert result.get("changed") is True assert result.get("content") is not None # Traverse the content list for a known verbose keyword and track state + is_verbose = False if any('BGYSC0804I' in str for str in result.get("content")): is_verbose = True assert is_verbose @@ -171,6 +171,24 @@ def test_zos_operator_positive_verbose_blocking(ansible_zos_module): assert result.get('elapsed') >= wait_time_s +def test_zos_operator_positive_path_preserve_case(ansible_zos_module): + hosts = ansible_zos_module + command = "D U,all" + results = hosts.all.zos_operator( + cmd=command, + verbose=False, + case_sensitive=True + ) + + for result in results.contacted.values(): + assert result["rc"] == 0 + assert result.get("changed") is True + assert result.get("content") is not None + # Making sure the output from opercmd logged the command + # exactly as it was written. + assert len(result.get("content")) > 1 + assert command in result.get("content")[1] + def test_response_come_back_complete(ansible_zos_module): hosts = ansible_zos_module @@ -185,31 +203,33 @@ def test_response_come_back_complete(ansible_zos_module): def test_zos_operator_parallel_terminal(get_config): - path = get_config - with open(path, 'r') as file: - enviroment = yaml.safe_load(file) - ssh_key = enviroment["ssh_key"] - hosts = enviroment["host"].upper() - user = enviroment["user"].upper() - python_path = enviroment["python_path"] - cut_python_path = python_path[:python_path.find('/bin')].strip() - zoau = enviroment["environment"]["ZOAU_ROOT"] - try: - playbook = "playbook.yml" - inventory = "inventory.yml" - os.system("echo {0} > {1}".format(quote(PARALLEL_RUNNING.format( - zoau, - cut_python_path, - )), playbook)) - os.system("echo {0} > {1}".format(quote(INVENTORY.format( - hosts, - ssh_key, - user, - )), inventory)) - command = "(ansible-playbook -i {0} {1}) & (ansible-playbook -i {0} {1})".format(inventory, playbook) - stdout = os.system(command) - assert stdout == 0 - finally: - os.remove("inventory.yml") - os.remove("playbook.yml") - + path = get_config + with open(path, 'r') as file: + enviroment = yaml.safe_load(file) + ssh_key = enviroment["ssh_key"] + hosts = enviroment["host"].upper() + user = enviroment["user"].upper() + python_path = enviroment["python_path"] + cut_python_path = python_path[:python_path.find('/bin')].strip() + zoau = enviroment["environment"]["ZOAU_ROOT"] + try: + playbook = "playbook.yml" + inventory = "inventory.yml" + os.system("echo {0} > {1}".format(quote(PARALLEL_RUNNING.format( + zoau, + cut_python_path, + )), playbook)) + os.system("echo {0} > {1}".format(quote(INVENTORY.format( + hosts, + ssh_key, + user, + )), inventory)) + command = "(ansible-playbook -i {0} {1}) & (ansible-playbook -i {0} {1})".format( + inventory, + playbook + ) + stdout = os.system(command) + assert stdout == 0 + finally: + os.remove("inventory.yml") + os.remove("playbook.yml") diff --git a/tests/helpers/ztest.py b/tests/helpers/ztest.py index af198d6f0..e471dfb26 100644 --- a/tests/helpers/ztest.py +++ b/tests/helpers/ztest.py @@ -13,6 +13,7 @@ __metaclass__ = type +import json import os import stat import uuid @@ -47,6 +48,96 @@ def from_yaml_file(cls, path): testvars = safe_load(varfile) return cls(**testvars) + @classmethod + def from_args(cls, src): + """ + ZTestHelper provides helper methods to deal with added complexities when testing against a z/OS system. + Similar to method `from_yaml_file(path)`, this method takes a dictionary of required keywords instead + of dictionary from a file so to increase performance. + + Args: + src - (dictionary) with keywords {'host': 'required', 'user': 'required', 'zoau': 'required', 'pyz': 'required', 'pythonpath': 'required', 'extra_args': 'optional'}" + host - z/OS managed node + user - user/omvs segment authorized to run ansible playbooks + zoau - home directory where z Open Automation Utilities is installed + pyz - python home + pythonpath - environment variable that is used to specify the location of Python libraries, eg ZOAU python modules + extra_args - dictionary used to include properties such as 'volumes' or other dynamic content. + + Code Example: + if request.config.getoption("--zinventory-raw"): + src = request.config.getoption("--zinventory-raw") + helper = ZTestHelper.from_args(src) + interpreter_str = helper.build_interpreter_string() + inventory = helper.get_inventory_info() + python_path = helper.get_python_path() + Shell example with pytest: + pytest tests/functional/modules/test_zos_mount_func.py::test_basic_mount --host-pattern=all -s -v --zinventory-raw='{"host": "zvm.ibm.com", "user": "ibmuser", "zoau": "/zoau/v1.3.1", "pyz": "/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz", "pythonpath": "/zoau/v1.3.1/lib/3.10", "extra_args":{"volumes":["222222","000000"],"other":"something else"}}' -s + + { + "host":"zvm.ibm.com", + "user":"ibmuser", + "zoau":"/zoau/v1.3.1", + "pyz":"/allpython/3.10/usr/lpp/IBM/cyp/v3r10/pyz", + "pythonpath": "/zoau/v1.3.1/lib/3.10", + "extra_args":{ + "volumes":[ + "vol1", + "vol2" + ], + "other": "something else" } + } + """ + #TODO: add support for a positional string, eg "host,user,zoau,pyz" then convert it as needed + + host, user, zoau, pyz, pythonpath, extra_args, extra = None, None, None, None, None, None, None + + src = json.loads(src) + # Traverse the src here , can we trow an exception? + for key, value in src.items(): + if key == "host": + host = value + elif key == "user": + user = value + elif key == "zoau": + zoau = value + elif key == "pyz": + pyz = value + elif key == "pythonpath": + pythonpath = value + elif key == "extra_args": + extra = value + + for prop in [host, user, zoau, pyz, pythonpath]: + if prop is None: + message = f"Invalid value for use with keyword, the value must not be None" + raise ValueError(message) + + environment_vars = dict() + environment_vars.update({'_BPXK_AUTOCVT': 'ON'}) + environment_vars.update({'_CEE_RUNOPTS': '\'FILETAG(AUTOCVT,AUTOTAG) POSIX(ON)\''}) + environment_vars.update({'_TAG_REDIR_IN': 'txt'}) + environment_vars.update({'_TAG_REDIR_OUT': 'txt'}) + environment_vars.update({'LANG': 'C'}) + environment_vars.update({'ZOAU_HOME': zoau}) + environment_vars.update({'LIBPATH': f"{zoau}/lib:{pyz}/lib:/lib:/usr/lib:."}) + environment_vars.update({'PYTHONPATH': f"{pythonpath}"}) # type: ignore + environment_vars.update({'PATH': f"{zoau}/bin:{pyz}/bin:/bin:/usr/sbin:/var/bin"}) + environment_vars.update({'PYTHONSTDINENCODING': 'cp1047'}) + + testvars = dict() + testvars.update({'host': host}) + testvars.update({'user': user}) + testvars.update({'python_path': f"{pyz}/bin/python3"}) + testvars.update({'environment': environment_vars}) + + if(extra): + extra_args = dict() + extra_args.update(extra) + testvars.update(extra_args) + + return cls(**testvars) + def get_inventory_info(self): """ Returns dictionary containing basic info needed to generate a single-host inventory file. """ inventory_info = { @@ -68,3 +159,20 @@ def build_interpreter_string(self): def get_python_path(self): """ Returns python path """ return self._python_path + + def get_extra_args(self) -> dict: + """ Extra args dictionary """ + return self._extra_args + + def get_extra_args(self, key: str): + """ Extra args dictionary """ + return self._extra_args.get(key) or self._extra_args.get(key.lower()) + + def get_volumes_list(self) -> list[str]: + """ Get volumes as a list if its been defined in extra args""" + for key, value in self._extra_args.items(): + if key.lower() == "volumes": + if not isinstance(value, list): + message = f"Invalid value for use with property [{key}], value must be type list[]." + raise ValueError(message) + return value