diff --git a/.gitignore b/.gitignore index 8c0a25c1ce..8a7bd72dce 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ docs/plugins/test-checks.rst docs/_build docs/spec docs/stories +docs/_static/tmt-small.png # Python diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d28627af6e..8389b21883 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -36,7 +36,7 @@ repos: - "docutils>=0.16" # 0.16 is the current one available for RHEL9 - "fmf>=1.3.0" - "jinja2>=2.11.3" # 3.1.2 / 3.1.2 - - "pint>=0.16.1,<0.20" # 0.16.1 / 0.19.x TODO: Pint 0.20 requires larger changes to tmt.hardware + - "pint>=0.16.1" # 0.16.1 - "pygments>=2.7.4" # 2.7.4 is the current one available for RHEL9 - "requests>=2.25.1" # 2.28.2 / 2.31.0 - "ruamel.yaml>=0.16.6" # 0.17.32 / 0.17.32 @@ -81,7 +81,7 @@ repos: - "docutils>=0.16" # 0.16 is the current one available for RHEL9 - "fmf>=1.3.0" - "jinja2>=2.11.3" # 3.1.2 / 3.1.2 - - "pint>=0.16.1,<0.20" # 0.16.1 / 0.19.x TODO: Pint 0.20 requires larger changes to tmt.hardware + - "pint>=0.16.1" # 0.16.1 / 0.19.x TODO: Pint 0.20 requires larger changes to tmt.hardware - "pygments>=2.7.4" # 2.7.4 is the current one available for RHEL9 - "requests>=2.25.1" # 2.28.2 / 2.31.0 - "ruamel.yaml>=0.16.6" # 0.17.32 / 0.17.32 @@ -161,3 +161,22 @@ repos: - id: codespell additional_dependencies: - tomli # Required for python < 3.11 + + - repo: https://github.com/djlint/djLint + rev: v1.34.1 + hooks: + - id: djlint + files: "\\.j2" + types_or: ['jinja'] + + - repo: https://github.com/aristanetworks/j2lint.git + rev: v1.1.0 + hooks: + - id: j2lint + args: + # j2lint does not consume pyproject.toml + - "--ignore" + - jinja-statements-indentation + - jinja-variable-lower-case + - single-statement-per-line + - "--" diff --git a/containers/fedora/coreos/Containerfile b/containers/fedora/coreos/Containerfile index 4e256edb3e..d8c7187369 100644 --- a/containers/fedora/coreos/Containerfile +++ b/containers/fedora/coreos/Containerfile @@ -14,4 +14,14 @@ RUN rpm-ostree install dnf5 \ # Remove diffutils as its used in many package manager tests, and tests # are simpler if all environments lack the same package, we don't have # to parametrize them even more. - && dnf5 remove -y diffutils + # Do *NOT* use dnf5 to remove this package - it might create conflicts + # in /var/lib/dnf should the next command called be `debuginfo-install` + # or any other dnf4-ish command. + && rpm-ostree uninstall diffutils \ + # Removing diffutils, these need to be removed too. + containers-common-extra \ + passt \ + passt-selinux \ + podman \ + policycoreutils \ + toolbox diff --git a/containers/fedora/coreos/ostree/Containerfile b/containers/fedora/coreos/ostree/Containerfile index fd3609b3d2..1387e9f580 100644 --- a/containers/fedora/coreos/ostree/Containerfile +++ b/containers/fedora/coreos/ostree/Containerfile @@ -16,6 +16,16 @@ RUN rpm-ostree install dnf5 \ # Remove diffutils as its used in many package manager tests, and tests # are simpler if all environments lack the same package, we don't have # to parametrize them even more. - && dnf5 remove -y diffutils \ + # Do *NOT* use dnf5 to remove this package - it might create conflicts + # in /var/lib/dnf should the next command called be `debuginfo-install` + # or any other dnf4-ish command. + && rpm-ostree uninstall diffutils \ + # Removing diffutils, these need to be removed too. + containers-common-extra \ + passt \ + passt-selinux \ + podman \ + policycoreutils \ + toolbox \ # Simulate ostree-booted environment && touch /run/ostree-booted diff --git a/docs/Makefile b/docs/Makefile index 58c91383e5..2f18033e38 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -4,8 +4,11 @@ .DEFAULT_GOAL := help .PHONY: help generate-plugins plugins/*.rst generate-stories generate-template-filters generate-autodocs clean +LOGO_SRC = https://raw.githubusercontent.com/teemtee/docs/main/logo/tmt-small.png +LOGO_DST = _static/tmt-small.png + clean: - rm -rf _build stories spec code/autodocs/*.rst code/template-filters.rst + rm -rf _build stories spec code/autodocs/*.rst code/template-filters.rst $(LOGO_DST) find plugins -name "*.rst" ! -name index.rst | xargs rm -f ## @@ -18,7 +21,12 @@ TEMPLATESDIR = templates PLUGINS_TEMPLATE := $(TEMPLATESDIR)/plugins.rst.j2 -generate: spec stories generate-lint-checks generate-template-filters generate-plugins generate-stories generate-autodocs ## Refresh all generated documentation sources +generate: $(LOGO_DST) spec stories generate-lint-checks generate-template-filters generate-plugins generate-stories generate-autodocs ## Refresh all generated documentation sources + +# We can ignore the error: later, during the build, if the logo is +# missing, Sphinx will complain. +$(LOGO_DST): + -curl -f $(LOGO_SRC) -o $(LOGO_DST) spec: mkdir -p spec diff --git a/docs/codespell.ignore b/docs/codespell.ignore index d4a6693244..fab1ad29a6 100644 --- a/docs/codespell.ignore +++ b/docs/codespell.ignore @@ -1 +1,3 @@ # Override the parent implementation - it would try to call `Tree.storys()`... + passt \ + passt-selinux \ diff --git a/docs/conf.py b/docs/conf.py index a106153062..39e0b65fc0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,7 +16,7 @@ import subprocess import sys from pathlib import Path -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: from sphinx.application import Sphinx @@ -222,7 +222,7 @@ def _load_theme( # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -# html_favicon = None +html_favicon = '_static/tmt-small.png' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -313,7 +313,7 @@ def _load_theme( ] -def generate_tmt_docs(app: Sphinx) -> None: +def generate_tmt_docs(app: Sphinx, config: Any) -> None: """ Run `make generate` to populate the auto-generated sources """ conf_dir = Path(app.confdir) @@ -321,4 +321,7 @@ def generate_tmt_docs(app: Sphinx) -> None: def setup(app: Sphinx) -> None: - app.connect("builder-inited", generate_tmt_docs) + # Generate sources after loading configuration. That should build + # everything, including the logo, before Sphinx starts checking + # whether all input files exist. + app.connect("config-inited", generate_tmt_docs) diff --git a/docs/templates/lint-checks.rst.j2 b/docs/templates/lint-checks.rst.j2 index 52c5d614be..214ccb4f42 100644 --- a/docs/templates/lint-checks.rst.j2 +++ b/docs/templates/lint-checks.rst.j2 @@ -34,7 +34,7 @@ Below you can find the list of available checks. See the - {{ linter.help }} {% endfor %} -{% endmacro%} +{% endmacro %} {{ emit_table('Test checks', TEST_LINTERS) }} {{ emit_table('Plan checks', PLAN_LINTERS) }} diff --git a/docs/templates/plugins.rst.j2 b/docs/templates/plugins.rst.j2 index 7f25a6ebc1..db01eef1b0 100644 --- a/docs/templates/plugins.rst.j2 +++ b/docs/templates/plugins.rst.j2 @@ -10,7 +10,7 @@ .. _plugins/{{ STEP }}/{{ PLUGIN_ID | trim }}: {{ PLUGIN_ID }} -{{ '-' * (PLUGIN_ID | length)}} +{{ '-' * (PLUGIN_ID | length) }} {# Emit the warning only for plugins that have not been reviewed yet. #} {% set plugin_full_id = STEP + "/" + PLUGIN_ID %} @@ -65,7 +65,7 @@ Configuration Default: *could not render default value correctly* {% endif %} {% endif %} -{%endmacro %} +{% endmacro %} {% set ignored_fields = container_ignored_fields(PLUGIN_DATA_CLASS) %} {% set inherited_fields = container_inherited_fields(PLUGIN_DATA_CLASS) | sort %} diff --git a/docs/templates/story.rst.j2 b/docs/templates/story.rst.j2 index 4e01aeedd4..59f3568956 100644 --- a/docs/templates/story.rst.j2 +++ b/docs/templates/story.rst.j2 @@ -219,7 +219,7 @@ {# Links pointing to websites #} {% elif link.target | match('^https?://') %} -* {{ printable_relation(link) }} `{{ link.target}} <{{ link.target }}>`_ +* {{ printable_relation(link) }} `{{ link.target }} <{{ link.target }}>`_ {# Links pointing to anything else #} {% else %} diff --git a/docs/templates/template-filters.rst.j2 b/docs/templates/template-filters.rst.j2 index ee0e2756bb..f47e1aa53d 100644 --- a/docs/templates/template-filters.rst.j2 +++ b/docs/templates/template-filters.rst.j2 @@ -25,7 +25,7 @@ __ https://jinja.palletsprojects.com/en/3.1.x/templates/#filters {% set filter_callable = TEMPLATES[filter_name] %} {{ filter_name }} -{{ '-' * (filter_name | length)}} +{{ '-' * (filter_name | length) }} {% if filter_callable.__doc__ %} {{ filter_callable.__doc__ | dedent | trim }} diff --git a/pyproject.toml b/pyproject.toml index 9c6587aa1e..fd5ade6893 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ dependencies = [ # F39 / PyPI "docutils>=0.16", # 0.16 is the current one available for RHEL9 "fmf>=1.3.0", "jinja2>=2.11.3", # 3.1.2 / 3.1.2 - "pint>=0.16.1,<0.20", # 0.16.1 / 0.19.x TODO: Pint 0.20 requires larger changes to tmt.hardware + "pint>=0.16.1", # 0.16.1 "pygments>=2.7.4", # 2.7.4 is the current one available for RHEL9 "requests>=2.25.1", # 2.28.2 / 2.31.0 "ruamel.yaml>=0.16.6", # 0.17.32 / 0.17.32 @@ -249,7 +249,8 @@ ignore = [ "tmt/convert.py", "tmt/lint.py", "tmt/queue.py", - "tmt/utils.py" + "tmt/utils.py", + "tmt/hardware.py", # pyright does not pick up pint's _typing.py or something :/ ] pythonVersion = "3.9" @@ -281,45 +282,59 @@ src = ["tmt", "tests"] target-version = "py39" lint.select = [ "F", # pyflakes - "E", # pycodestyle - "W", # pycodestyle + "E", # pycodestyle error + "W", # pycodestyle warning "I", # isort "N", # pep8-naming "D", # pydocstyle "UP", # pyupgrade - "B", # flake8-bugbear - "C4", # flake8-comprehensions "YTT", # flake8-2020 - "PT", # flake8-pytest-style - "RET", # flake8-return - "SIM", # flake8-simplify + "ASYNC", # flake8-async + "S", # flake8-bandit + "B", # flake8-bugbear + "A", # flake8-builtins "COM", # flake8-commas + "C4", # flake8-comprehensions "DTZ", # flake8-datetimez "T10", # flake8-debugger "EXE", # flake8-executable + "ISC", # flake8-implicit-str-concat + "ICN", # flake8-import-conventions + "LOG", # flake8-logging + "G", # flake8-logging-format "PIE", # flake8-pie + "PYI", # flake8-pyi + "PT", # flake8-pytest-style + "Q003", # avoidable-escaped-quote + "Q004", # unnecessary-escaped-quote "RSE", # flake8-raise + "RET", # flake8-return + "SIM", # flake8-simplify + "TID", # flake8-tidy-imports + "INT", # flake8-gettext "PGH", # pygrep-hooks "PLC", # pylint-convention "PLE", # pylint-error - "PLR01", # pylint-refactor - "PLR02", - "PLR04", - "PLR1", + "PLR", # pylint-refactor "RUF", # ruff "D", # pydocstyle ] lint.ignore = [ "B904", # Within an `except` clause, raise exceptions with `raise ... from err` or `raise ... "COM812", # Trailing comma missing - # tmt codebase uses `warn` by default - disabling the check, switching to - # `warning` can be done in an extra patch. - "G010", # `warn` is deprecated in favor of `warning` + "G004", # Logging statement uses f-string "PIE790", # Unnecessary `pass` statement "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLE1205", # Too many arguments for `logging` format string + "PLR09", # Too many branches/statements/arguments/returns + "PLR2004", # Magic value used in comparison "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` "RUF013", # PEP 484 prohibits implicit `Optional` + # flake8-bandit (S) https://docs.astral.sh/ruff/rules/#flake8-bandit-s + "S101", # Use of `assert` detected + "S603", # `subprocess` call: check for execution of untrusted input + "S607", # Starting a process with a partial executable path + "S105", # Possible hardcoded password assigned to: "PASS" # pydocstyle # TODO: the permanent list (drop this comment once the temporary list @@ -345,6 +360,17 @@ lint.ignore = [ "D415", # First line should end with a period, question mark, or exclamation point ] +lint.logger-objects = ["tmt.log.Logger"] + +[tool.ruff.lint.per-file-ignores] +# Less strict security checks in tests +"tests/unit*" = [ + "S604", # Function call with shell=True parameter identified, security issue + "S605", # Starting a process with a shell: seems safe, but may be changed in the future + "S318", # Using xml to parse untrusted data is known to be vulnerable to XML attacks + "S108", # Probable insecure usage of temporary file or directory: "{}" + ] + [tool.ruff.lint.flake8-bugbear] extend-immutable-calls = ["tmt.utils.field"] @@ -358,6 +384,9 @@ extend-immutable-calls = ["tmt.utils.field"] convention = "pep257" property-decorators = ["tmt.utils.cached_property"] +[tool.ruff.lint.flake8-builtins] +builtins-ignorelist = ["help", "format", "input", "filter", "copyright", "max"] + [tool.ruff.lint.isort] known-first-party = ["tmt"] @@ -371,3 +400,7 @@ markers = [ ignore-words = "docs/codespell.dic" exclude-file = "docs/codespell.ignore" skip = "tests/execute/weird/data/weird.txt,tests/lint/plan/data/invalid_attr.fmf,tests/lint/plan/test.sh" + +[tool.djlint] +use_gitignore=true +ignore="H005,H030,H031" diff --git a/tests/integration/test_nitrate.py b/tests/integration/test_nitrate.py index eb13e959a5..ae6c3379e6 100644 --- a/tests/integration/test_nitrate.py +++ b/tests/integration/test_nitrate.py @@ -11,10 +11,9 @@ import tmt.base import tmt.cli import tmt.log +from tests import CliRunner from tmt.utils import ConvertError, Path -from .. import CliRunner - # Prepare path to examples TEST_DIR = Path(__file__).parent @@ -24,8 +23,8 @@ class Base(RequreTestCase): def setUp(self): super().setUp() - self.tmpdir = Path(tempfile.mktemp(prefix=str(TEST_DIR))) - shutil.copytree(self.EXAMPLES, self.tmpdir) + self.tmpdir = Path(tempfile.mkdtemp(prefix=str(TEST_DIR))) + shutil.copytree(self.EXAMPLES, self.tmpdir, dirs_exist_ok=True) self.cwd = os.getcwd() self.runner_output = None diff --git a/tests/integration/test_polarion.py b/tests/integration/test_polarion.py index 3c412f0bb6..e5cfa780c2 100644 --- a/tests/integration/test_polarion.py +++ b/tests/integration/test_polarion.py @@ -4,9 +4,9 @@ from fmf import Tree import tmt.cli +from tests import CliRunner from tmt.identifier import ID_KEY -from .. import CliRunner from .test_nitrate import TEST_DIR, Base diff --git a/tests/pip/install.fmf b/tests/pip/install.fmf index b7a0b735bd..f212f4a135 100644 --- a/tests/pip/install.fmf +++ b/tests/pip/install.fmf @@ -5,6 +5,12 @@ require: - python3 - python3-devel tier: null +adjust: + when: distro == fedora-rawhide + result: xfail + # 'mini' should start passing once https://github.com/hgrecco/pint/issues/1969 is resolved + # if/once that happens, the xfail should be moved to 'full' only + because: "Un-installable dependencies on Python 3.13" /mini: summary: Ensure the minimal pip install works diff --git a/tests/precommit/main.fmf b/tests/precommit/main.fmf index e8bae07b4b..b24a42be9f 100644 --- a/tests/precommit/main.fmf +++ b/tests/precommit/main.fmf @@ -4,3 +4,9 @@ require: - git-core - tmt tier: 4 +adjust: + when: distro == fedora-rawhide + result: xfail + # Remove the xfail adjust once it starts passing. + # Dependent on https://github.com/crate-py/rpds/issues/72, PyO3 0.22 + because: "Un-installable dependencies on Python 3.13" diff --git a/tests/unit/provision/mrack/test_hw.py b/tests/unit/provision/mrack/test_hw.py index 27ae4c2375..ce6e985397 100644 --- a/tests/unit/provision/mrack/test_hw.py +++ b/tests/unit/provision/mrack/test_hw.py @@ -3,6 +3,7 @@ import pytest import tmt.utils +from tests.unit.test_hardware import FULL_HARDWARE_REQUIREMENTS from tmt.hardware import ( Hardware, Operator, @@ -20,8 +21,6 @@ operator_to_beaker_op, ) -from ...test_hardware import FULL_HARDWARE_REQUIREMENTS - @pytest.mark.parametrize( ('operator', 'value', 'expected'), @@ -145,6 +144,15 @@ def test_maximal_constraint(root_logger: Logger) -> None: {'or': []} ] }, + { + 'and': [ + {'or': []}, + {'or': []}, + {'or': []}, + {'or': []}, + {'or': []} + ] + }, { 'system': { 'memory': { diff --git a/tests/unit/provision/testcloud/test_hw.py b/tests/unit/provision/testcloud/test_hw.py index 4b9ce97c5e..1f2234b8f7 100644 --- a/tests/unit/provision/testcloud/test_hw.py +++ b/tests/unit/provision/testcloud/test_hw.py @@ -6,6 +6,7 @@ import pytest from testcloud.domain_configuration import DomainConfiguration, TPMConfiguration +from tests.unit import MATCH, assert_log from tmt.hardware import TPM_VERSION_ALLOWED_OPERATORS, Hardware, Operator from tmt.log import Logger from tmt.steps.provision.testcloud import ( @@ -16,8 +17,6 @@ import_testcloud, ) -from ... import MATCH, assert_log - import_testcloud() # These must be imported *after* importing testcloud @@ -154,8 +153,9 @@ def test_tpm_unsupported_version( assert_log( caplog, - message=MATCH(r"warn: Cannot apply hardware requirement 'tpm\.version: == 0\.0\.0', TPM version not supported."), # noqa: E501 - levelno=logging.WARN) + message=MATCH(r"warn: Cannot apply hardware requirement 'tpm\.version: == 0\.0\.0', " + r"TPM version not supported."), + levelno=logging.WARNING) @pytest.mark.parametrize( @@ -181,4 +181,4 @@ def test_tpm_unsupported_operator( assert_log( caplog, message=MATCH(rf"warn: Cannot apply hardware requirement 'tpm\.version: {op} 2\.0', operator not supported."), # noqa: E501 - levelno=logging.WARN) + levelno=logging.WARNING) diff --git a/tests/unit/test_base.py b/tests/unit/test_base.py index 59a53d270e..80fbe84000 100644 --- a/tests/unit/test_base.py +++ b/tests/unit/test_base.py @@ -8,11 +8,10 @@ import tmt import tmt.cli +from tests import CliRunner from tmt.base import FmfId, Link, LinkNeedle, Links, expand_node_data from tmt.utils import Path, SpecificationError -from .. import CliRunner - runner = CliRunner() diff --git a/tests/unit/test_cli.py b/tests/unit/test_cli.py index fcd4f1b9ed..e9dcdb357e 100644 --- a/tests/unit/test_cli.py +++ b/tests/unit/test_cli.py @@ -9,8 +9,7 @@ import tmt.cli import tmt.log - -from .. import CliRunner +from tests import CliRunner # Prepare path to examples PATH = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/unit/test_export_to_nitrate.py b/tests/unit/test_export_to_nitrate.py index 7e336e9267..9547d4da80 100644 --- a/tests/unit/test_export_to_nitrate.py +++ b/tests/unit/test_export_to_nitrate.py @@ -11,8 +11,8 @@ class NitrateExportAutomated(TestCase): def setUp(self): - self.tmp_dir = Path(tempfile.mktemp(prefix=str(TEST_DIR))) - shutil.copytree(TEST_DIR, self.tmp_dir) + self.tmp_dir = Path(tempfile.mkdtemp(prefix=str(TEST_DIR))) + shutil.copytree(TEST_DIR, self.tmp_dir, dirs_exist_ok=True) self.cwd = os.getcwd() self.dir_name = 'manual_test' diff --git a/tests/unit/test_hardware.py b/tests/unit/test_hardware.py index f1a254ecf3..98a544a237 100644 --- a/tests/unit/test_hardware.py +++ b/tests/unit/test_hardware.py @@ -169,6 +169,12 @@ def test_normalize_invalid_hardware( - avx - "= avx2" - "!= smep" + device: + device-name: '~ .*Thunderbolt.*' + device: 79 + vendor-name: '!= Intel' + vendor: "> 97" + driver: mc disk: - size: 40 GiB model-name: "~ WD 100G.*" @@ -235,6 +241,12 @@ def test_parse_maximal_constraint() -> None: - cpu.flag: contains avx - cpu.flag: contains avx2 - cpu.flag: not contains smep + - and: + - device.vendor: '> 97' + - device.device: == 79 + - device.vendor-name: '!= Intel' + - device.device-name: ~ .*Thunderbolt.* + - device.driver: == mc - and: - gpu.vendor: == 4318 - gpu.device: == 97 diff --git a/tests/unit/test_id.py b/tests/unit/test_id.py index 5d4c5f7ff1..681b210350 100644 --- a/tests/unit/test_id.py +++ b/tests/unit/test_id.py @@ -8,11 +8,10 @@ import tmt import tmt.cli import tmt.log +from tests import CliRunner from tmt.identifier import ID_KEY from tmt.utils import Path -from .. import CliRunner - runner = CliRunner() test_path = Path(__file__).parent / "id" root_logger = tmt.log.Logger.create() diff --git a/tests/unit/test_logging.py b/tests/unit/test_logging.py index 193a927b1b..a7559dbbef 100644 --- a/tests/unit/test_logging.py +++ b/tests/unit/test_logging.py @@ -40,7 +40,7 @@ def _exercise_logger( logger.debug('this is a debug message') logger.verbose('this is a verbose message') logger.info('this is just an info') - logger.warn('this is a warning') + logger.warning('this is a warning') logger.fail('this is a failure') captured = capsys.readouterr() @@ -76,7 +76,7 @@ def _exercise_logger( details_key='warn', details_value='this is a warning', details_logger_labels=labels, - levelno=logging.WARN) + levelno=logging.WARNING) assert_log( caplog, message=f'{prefix}fail: this is a failure', @@ -97,7 +97,7 @@ def test_creation(caplog: _pytest.logging.LogCaptureFixture, root_logger: Logger logger = Logger.create() assert logger._logger.name == 'tmt' - actual_logger = logging.Logger('3rd-party-app-logger') + actual_logger = logging.Logger('3rd-party-app-logger') # noqa: LOG001 logger = Logger.create(actual_logger) assert logger._logger is actual_logger diff --git a/tests/unit/test_report_junit.py b/tests/unit/test_report_junit.py index 104bf4e3da..ef092e74fe 100644 --- a/tests/unit/test_report_junit.py +++ b/tests/unit/test_report_junit.py @@ -52,9 +52,8 @@ def _compare_xml_node(tree_path: list[str], expected: xml.dom.Node, actual: xml. # Make sure node names do match. assert expected.nodeName == actual.nodeName, \ - f"Element name mismatch at {tree_path_joined}: " \ - f"expected {expected.nodeName}, " \ - f"found {actual.nodeName}" + (f"Element name mismatch at {tree_path_joined}: " + f"expected {expected.nodeName}, found {actual.nodeName}") # If nodes have the same tag, move on to attributes. Make sure both nodes # have the same set of attributes, with same respective values. @@ -65,16 +64,15 @@ def _compare_xml_node(tree_path: list[str], expected: xml.dom.Node, actual: xml. actual_attributes = sorted((actual.attributes or {}).items()) assert len(expected_attributes) == len(actual_attributes), \ - f"Attribute count mismatch at {tree_path_joined}: " \ - f"expected {len(expected_attributes)}, " \ - f"found {len(actual_attributes)}" + (f"Attribute count mismatch at {tree_path_joined}: " + f"expected {len(expected_attributes)}, found {len(actual_attributes)}") for (expected_name, expected_value), (actual_name, actual_value) in zip( expected_attributes, actual_attributes): - assert expected_name == actual_name, f"Attribute mismatch at {tree_path_joined}: " \ - f"expected {expected_name}=\"{expected_value}\"" - assert expected_value == actual_value, f"Attribute mismatch at {tree_path_joined}: " \ - f"found {actual_name}=\"{actual_value}\"" + assert expected_name == actual_name, (f'Attribute mismatch at {tree_path_joined}: ' + f'expected {expected_name}="{expected_value}"') + assert expected_value == actual_value, (f'Attribute mismatch at {tree_path_joined}: ' + f'found {actual_name}="{actual_value}"') # Hooray, attributes match. Dig deeper, how about children? # To compare children, use this very function to compare each child with @@ -94,9 +92,8 @@ def _valid_children(node: xml.dom.Node) -> list[xml.dom.Node]: actual_children = _valid_children(actual) assert len(expected_children) == len(actual_children), \ - f"Children count mismatch at {tree_path_joined}: " \ - f"expected {len(expected_children)}, " \ - f"found {len(actual_children)}" + (f"Children count mismatch at {tree_path_joined}: " + f"expected {len(expected_children)}, found {len(actual_children)}") return all( _compare_xml_node( diff --git a/tmt/base.py b/tmt/base.py index 375c61d285..8c10bfe0bd 100644 --- a/tmt/base.py +++ b/tmt/base.py @@ -940,9 +940,8 @@ def detect_unallowed_properties_with_pattern( return for bad_property in match.group(1).replace("'", '').replace(' ', '').split(','): - yield LinterOutcome.WARN, \ - f'key "{bad_property}" not recognized by schema,' \ - f' and does not match "{match.group(2)}" pattern' + yield LinterOutcome.WARN, (f'key "{bad_property}" not recognized by schema, ' + f'and does not match "{match.group(2)}" pattern') # A key value is not recognized. This is often a case with keys whose values are # limited by an enum, like `how`. Unfortunately, validator will record every mismatch @@ -1513,9 +1512,8 @@ def lint_require_type_field(self) -> LinterReturn: if not tmt.utils.is_key_origin(self.node, 'require') \ and all(dependency in metadata.get('require', []) for dependency in missing_type): - yield LinterOutcome.FAIL, \ - 'some library/file requirement are missing type, but inherited from test parent,' \ - ' please, fix manually' + yield LinterOutcome.FAIL, ('some library/file requirement are missing type, ' + 'but inherited from test parent, please, fix manually') return for dependency in metadata.get('require', []): @@ -2213,20 +2211,19 @@ def _lint_step(step: str) -> LinterReturn: continue if guest_names and guest_roles: - yield LinterOutcome.FAIL, \ - f"{step} phase '{phase.get('name')}' needs guest or role '{where}', " \ - f"guests {names_formatted} " \ - f"and roles {roles_formatted} were found" + yield (LinterOutcome.FAIL, + f"{step} phase '{phase.get('name')}' needs guest or role '{where}'," + f" guests {names_formatted} and roles {roles_formatted} were found") elif guest_names: - yield LinterOutcome.FAIL, \ - f"{step} phase '{phase.get('name')}' needs guest or role '{where}', " \ - f"guests {names_formatted} and no roles were found" + yield (LinterOutcome.FAIL, + f"{step} phase '{phase.get('name')}' needs guest or role " + f"'{where}', guests {names_formatted} and no roles were found") else: - yield LinterOutcome.FAIL, \ - f"{step} phase '{phase.get('name')}' needs guest or role '{where}', " \ - f"roles {roles_formatted} and no guests were found" + yield (LinterOutcome.FAIL, + f"{step} phase '{phase.get('name')}' needs guest or role " + f"'{where}', roles {roles_formatted} and no guests were found") yield from _lint_step('prepare') yield from _lint_step('execute') @@ -2824,8 +2821,9 @@ def _filters_conditions( # Links are in OR relation if links and all(not node.has_link(needle) for needle in links): continue - except BaseException: + except Exception as exc: # Handle broken link as not matching + self.debug(f'Invalid link ignored, exception was {exc}') continue # Exclude if any(node for expr in excludes if re.search(expr, node.name)): @@ -2920,24 +2918,12 @@ def name_filter(nodes: Iterable[fmf.Tree]) -> list[fmf.Tree]: Test(node=test, logger=self._logger.descend()) for test in self.tree.prune( keys=keys, sources=cmd_line_names)] - else: + elif not unique and names: # First let's build the list of test objects based on keys & names. # If duplicate test names are allowed, match test name/regexp # one-by-one and preserve the order of tests within a plan. - if not unique and names: - tests = [] - for name in names: - selected_tests = [ - Test( - node=test, - tree=self, - logger=logger.descend( - logger_name=test.get('name', None) - ) # .apply_verbosity_options(**self._options), - ) for test in name_filter(self.tree.prune(keys=keys, names=[name]))] - tests.extend(sorted(selected_tests, key=lambda test: test.order)) - # Otherwise just perform a regular key/name filtering - else: + tests = [] + for name in names: selected_tests = [ Test( node=test, @@ -2945,8 +2931,19 @@ def name_filter(nodes: Iterable[fmf.Tree]) -> list[fmf.Tree]: logger=logger.descend( logger_name=test.get('name', None) ) # .apply_verbosity_options(**self._options), - ) for test in name_filter(self.tree.prune(keys=keys, names=names))] - tests = sorted(selected_tests, key=lambda test: test.order) + ) for test in name_filter(self.tree.prune(keys=keys, names=[name]))] + tests.extend(sorted(selected_tests, key=lambda test: test.order)) + # Otherwise just perform a regular key/name filtering + else: + selected_tests = [ + Test( + node=test, + tree=self, + logger=logger.descend( + logger_name=test.get('name', None) + ) # .apply_verbosity_options(**self._options), + ) for test in name_filter(self.tree.prune(keys=keys, names=names))] + tests = sorted(selected_tests, key=lambda test: test.order) # Apply filters & conditions return self._filters_conditions( diff --git a/tmt/checks/watchdog.py b/tmt/checks/watchdog.py index de3d7172b1..136718c9f9 100644 --- a/tmt/checks/watchdog.py +++ b/tmt/checks/watchdog.py @@ -420,12 +420,12 @@ def before_test( guest_context: GuestContext = invocation.check_data[check.how] if check.ping and not isinstance(invocation.guest, PINGABLE_GUEST_CLASSES): - watchdog_logger.warn('Ping against this guest is not supported, disabling.') + watchdog_logger.warning('Ping against this guest is not supported, disabling.') check.ping = False if check.ssh_ping and not isinstance(invocation.guest, SSH_PINGABLE_GUEST_CLASSES): - watchdog_logger.warn('SSH ping against this guest is not supported, disabling.') + watchdog_logger.warning('SSH ping against this guest is not supported, disabling.') check.ssh_ping = False diff --git a/tmt/cli.py b/tmt/cli.py index 86f136a728..505cb6848f 100644 --- a/tmt/cli.py +++ b/tmt/cli.py @@ -5,8 +5,6 @@ import dataclasses import enum import re -import subprocess -import sys from collections.abc import Sequence from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union @@ -29,7 +27,7 @@ import tmt.trying import tmt.utils from tmt.options import Deprecated, create_options_decorator, option -from tmt.utils import Path, cached_property +from tmt.utils import Command, Path, cached_property if TYPE_CHECKING: from typing_extensions import Concatenate, ParamSpec @@ -2022,7 +2020,7 @@ def completion(**kwargs: Any) -> None: COMPLETE_SCRIPT = 'tmt-complete' -def setup_completion(shell: str, install: bool) -> None: +def setup_completion(shell: str, install: bool, context: Context) -> None: """ Setup completion based on the shell """ config = tmt.utils.Config() # Fish gets installed into its special location where it is automatically @@ -2033,13 +2031,20 @@ def setup_completion(shell: str, install: bool) -> None: else: script = Path(config.path) / f'{COMPLETE_SCRIPT}.{shell}' - # SIM115: Use context handler for opening files. Would not reduce complexity here. - out = open(script, 'w') if install else sys.stdout # noqa: SIM115 - subprocess.run(f'{COMPLETE_VARIABLE}={shell}_source tmt', - shell=True, stdout=out) + env_var = {COMPLETE_VARIABLE: f'{shell}_source'} + + logger = context.obj.logger + + completions = Command('tmt').run(env=tmt.utils.Environment.from_dict(env_var), + cwd=None, + logger=context.obj.logger + ).stdout + if not completions: + logger.warning("Unable to generate shell completion") + return if install: - out.close() + Path(script).write_text(completions) # If requested, modify .bashrc or .zshrc if shell != 'fish': config_path = Path(f'~/.{shell}rc').expanduser() @@ -2047,6 +2052,9 @@ def setup_completion(shell: str, install: bool) -> None: shell_config.write('\n# Generated by tmt\n') shell_config.write(f'source {script}') + else: + logger.info(completions) + @completion.command(name='bash') @pass_context @@ -2058,7 +2066,7 @@ def setup_completion(shell: str, install: bool) -> None: """) def completion_bash(context: Context, install: bool, **kwargs: Any) -> None: """ Setup shell completions for bash """ - setup_completion('bash', install) + setup_completion('bash', install, context) @completion.command(name='zsh') @@ -2071,7 +2079,7 @@ def completion_bash(context: Context, install: bool, **kwargs: Any) -> None: """) def completion_zsh(context: Context, install: bool, **kwargs: Any) -> None: """ Setup shell completions for zsh """ - setup_completion('zsh', install) + setup_completion('zsh', install, context) @completion.command(name='fish') @@ -2081,4 +2089,4 @@ def completion_zsh(context: Context, install: bool, **kwargs: Any) -> None: help="Persistently store the script to '~/.config/fish/completions/tmt.fish'.") def completion_fish(context: Context, install: bool, **kwargs: Any) -> None: """ Setup shell completions for fish """ - setup_completion('fish', install) + setup_completion('fish', install, context) diff --git a/tmt/convert.py b/tmt/convert.py index 3a983f6346..c13b61f319 100644 --- a/tmt/convert.py +++ b/tmt/convert.py @@ -86,11 +86,10 @@ def read_manual( for case_id in case_ids: testcase = nitrate.TestCase(case_id) if testcase.status.name != 'CONFIRMED' and not disabled: - log.debug( - testcase.identifier + ' skipped (testcase is not CONFIRMED).') + log.debug(f'{testcase.identifier} skipped (testcase is not CONFIRMED).') continue if testcase.script is not None and not with_script: - log.debug(testcase.identifier + ' skipped (script is not empty).') + log.debug(f'{testcase.identifier} skipped (script is not empty).') continue # Filename sanitization @@ -612,8 +611,8 @@ def target_content_build() -> list[str]: if parent.get(key) == test[key]: test.pop(key) - log.debug('Common metadata:\n' + format_value(common_data)) - log.debug('Individual metadata:\n' + format_value(individual_data)) + log.debug(f'Common metadata:\n{format_value(common_data)}') + log.debug(f'Individual metadata:\n{format_value(individual_data)}') return common_data, individual_data @@ -1007,16 +1006,15 @@ def read_nitrate_case( # Full 'Name Surname ' form if testcase.tester.name is not None: data['contact'] = f'{testcase.tester.name} <{testcase.tester.email}>' + elif makefile_data is None or 'contact' not in makefile_data: + # Otherwise use just the email address + data['contact'] = testcase.tester.email + # Use contact from Makefile if it's there and email matches + elif re.search(testcase.tester.email, makefile_data['contact']): + data['contact'] = makefile_data['contact'] else: - if makefile_data is None or 'contact' not in makefile_data: - # Otherwise use just the email address - data['contact'] = testcase.tester.email - # Use contact from Makefile if it's there and email matches - elif re.search(testcase.tester.email, makefile_data['contact']): - data['contact'] = makefile_data['contact'] - else: - # Otherwise use just the email address - data['contact'] = testcase.tester.email + # Otherwise use just the email address + data['contact'] = testcase.tester.email echo(style('contact: ', fg='green') + data['contact']) # Environment if testcase.arguments: diff --git a/tmt/export/__init__.py b/tmt/export/__init__.py index e0bdf126c2..3820668018 100644 --- a/tmt/export/__init__.py +++ b/tmt/export/__init__.py @@ -407,16 +407,13 @@ def count_html_headings(heading: str) -> None: count_html_headings(sections_headings['Setup'][0]) count_html_headings(sections_headings['Cleanup'][0]) - warn_outside_test_section = 'Heading "{}" from the section "{}" is '\ - 'used \noutside of Test sections.' - warn_headings_not_in_pairs = 'The number of headings from the section' \ - ' "Step" - {}\ndoesn\'t equal to the ' \ - 'number of headings from the section \n' \ - '"Expect" - {} in the test section "{}"' - warn_required_section_is_absent = '"{}" section doesn\'t exist in ' \ - 'the Markdown file' - warn_unexpected_headings = 'Headings "{}" aren\'t expected in the ' \ - 'section "{}"' + warn_outside_test_section = \ + 'Heading "{}" from the section "{}" is used \noutside of Test sections.' + warn_headings_not_in_pairs = \ + ('The number of headings from the section "Step" - {}\ndoesn\'t equal to the ' + 'number of headings from the section \n"Expect" - {} in the test section "{}"') + warn_required_section_is_absent = '"{}" section doesn\'t exist in the Markdown file' + warn_unexpected_headings = 'Headings "{}" aren\'t expected in the section "{}"' def required_section_exists( section: list[str], diff --git a/tmt/export/nitrate.py b/tmt/export/nitrate.py index dd62cc2724..862c6e6334 100644 --- a/tmt/export/nitrate.py +++ b/tmt/export/nitrate.py @@ -264,8 +264,8 @@ def return_markdown_file() -> Optional[Path]: files = '\n'.join(os.listdir()) reg_exp = r'.+\.md$' md_files = re.findall(reg_exp, files, re.M) - fail_message = "in the current working directory.\n" \ - "Manual steps couldn't be exported" + fail_message = ("in the current working directory.\n" + "Manual steps couldn't be exported") if len(md_files) == 1: return Path.cwd() / str(md_files[0]) if not md_files: diff --git a/tmt/hardware.py b/tmt/hardware.py index fa8bb0fa34..cd93cd22e8 100644 --- a/tmt/hardware.py +++ b/tmt/hardware.py @@ -64,7 +64,9 @@ from typing_extensions import TypeAlias #: A type of values describing sizes of things like storage or RAM. - Size: TypeAlias = 'Quantity[int]' + # Note: type-hinting is a bit wonky with pyright + # https://github.com/hgrecco/pint/issues/1166 + Size: TypeAlias = Quantity #: Unit registry, used and shared by all code. UNITS = pint.UnitRegistry() @@ -1550,7 +1552,7 @@ def report_support( or check(constraint): continue - logger.warn( + logger.warning( f"Hardware requirement '{constraint.printable_name}' is not supported.") def format_variants(self) -> Iterator[str]: diff --git a/tmt/log.py b/tmt/log.py index b5d7849956..75b7d54840 100644 --- a/tmt/log.py +++ b/tmt/log.py @@ -31,6 +31,7 @@ import logging.handlers import os import sys +import warnings from typing import ( TYPE_CHECKING, Any, @@ -305,12 +306,11 @@ def format(self, record: logging.LogRecord) -> str: pass # Otherwise render the message. - else: - if record.msg and record.args: - record.message = record.msg % record.args + elif record.msg and record.args: + record.message = record.msg % record.args - else: - record.message = record.msg + else: + record.message = record.msg # Original code from Formatter.format() - hard to inherit when overriding # Formatter.format()... @@ -485,15 +485,15 @@ def __init__( self._decolorize_output = create_decolorizer(apply_colors_output) def __repr__(self) -> str: - return '' + return (f'') @property def labels_span(self) -> int: @@ -797,13 +797,13 @@ def debug( message_topic=topic) ) - def warn( + def warning( self, message: str, shift: int = 0 ) -> None: self._log( - logging.WARN, + logging.WARNING, LogRecordDetails( key='warn', value=message, @@ -811,6 +811,14 @@ def warn( shift=shift) ) + def warn( + self, + message: str, + shift: int + ) -> None: + warnings.warn("Use `warning` instead", DeprecationWarning, stacklevel=1) + return self.warning(message, shift) + def fail( self, message: str, diff --git a/tmt/options.py b/tmt/options.py index 811c0baa74..af45b91e08 100644 --- a/tmt/options.py +++ b/tmt/options.py @@ -71,7 +71,7 @@ def option( is_flag: bool = False, multiple: bool = False, count: bool = False, - type: Optional[Union[click.Choice, Any]] = None, + type: Optional[Union[click.Choice, Any]] = None, # noqa: A002 `type` is shadowing a Python builtin help: Optional[str] = None, required: bool = False, default: Optional[Any] = None, @@ -108,7 +108,7 @@ def option( help = deprecated.rendered if choices is not None: - type = click.Choice(choices) + type = click.Choice(choices) # noqa: A001 `type` is shadowing a Python builtin # Add a metavar listing choices unless an explicit metavar has been provided if isinstance(type, click.Choice) and metavar is None: diff --git a/tmt/plugins/__init__.py b/tmt/plugins/__init__.py index 278d85e547..7c987e1718 100644 --- a/tmt/plugins/__init__.py +++ b/tmt/plugins/__init__.py @@ -342,7 +342,7 @@ def register_plugin( # TODO: would be raising an exception better? Probably, but since # plugin discovery happens in import time, it's very hard to manage # it. For now, report a warning, but do not raise an exception yet. - logger.warn( + logger.warning( f"Registering plugin '{plugin.__module__}' collides" f" with an already registered id '{plugin_id}'" f" of plugin '{self._plugins[plugin_id]}'.") diff --git a/tmt/steps/__init__.py b/tmt/steps/__init__.py index af7ac63eb5..1c2a607020 100644 --- a/tmt/steps/__init__.py +++ b/tmt/steps/__init__.py @@ -282,8 +282,6 @@ def pre_normalization(cls, raw_data: _RawStepData, logger: tmt.log.Logger) -> No def post_normalization(self, raw_data: _RawStepData, logger: tmt.log.Logger) -> None: """ Called after normalization, useful for tweaking normalized data """ - pass - # ignore[override]: expected, we need to accept one extra parameter, `logger`. @classmethod def from_spec( # type: ignore[override] @@ -307,7 +305,7 @@ def from_spec( # type: ignore[override] return data -class _RawWhereableStepData(TypedDict, total=False): +class RawWhereableStepData(TypedDict, total=False): where: Union[str, list[str]] @@ -1023,7 +1021,7 @@ class (single class or tuple of classes). """ if classes is None: - _classes: tuple[Union[type[Phase], type[PhaseT]], ...] = (Phase,) + _classes: tuple[type[Union[Phase, PhaseT]], ...] = (Phase,) elif not isinstance(classes, tuple): _classes = (classes,) @@ -1084,7 +1082,7 @@ def prune(self, logger: tmt.log.Logger) -> None: else: shutil.rmtree(member) except OSError as error: - logger.warn(f"Unable to remove '{member}': {error}") + logger.warning(f"Unable to remove '{member}': {error}") class Method: @@ -1396,9 +1394,8 @@ def delegate( assert data is not None assert data.__class__ is plugin_data_class, \ - f'Data package is instance of {data.__class__.__name__}, ' \ - f'plugin {plugin_class.__name__} ' \ - f'expects {plugin_data_class.__name__}' + (f'Data package is instance of {data.__class__.__name__}, ' + f'plugin {plugin_class.__name__} expects {plugin_data_class.__name__}') plugin = plugin_class( logger=step._logger.descend(logger_name=None), @@ -1558,9 +1555,8 @@ def wake(self) -> None: """ assert self.data.__class__ is self._data_class, \ - f'Plugin {self.__class__.__name__} woken with incompatible ' \ - f'data {self.data}, ' \ - f'expects {self._data_class.__name__}' + (f'Plugin {self.__class__.__name__} woken with incompatible ' + f'data {self.data}, expects {self._data_class.__name__}') if self.step.status() == 'done': self.debug('step is done, not overwriting plugin data') @@ -1625,7 +1621,7 @@ def prune(self, logger: tmt.log.Logger) -> None: try: shutil.rmtree(self.workdir) except OSError as error: - logger.warn(f"Unable to remove '{self.workdir}': {error}") + logger.warning(f"Unable to remove '{self.workdir}': {error}") class GuestlessPlugin(BasePlugin[StepDataT]): @@ -2174,8 +2170,7 @@ def phase_name(self) -> str: @property def name(self) -> str: - return f'{self.phase_name} ' \ - f'on {fmf.utils.listed(self.guest_ids)}' + return f'{self.phase_name} on {fmf.utils.listed(self.guest_ids)}' def run_on_guest(self, guest: 'Guest', logger: tmt.log.Logger) -> None: self.phase.go(guest=guest, logger=logger) diff --git a/tmt/steps/discover/__init__.py b/tmt/steps/discover/__init__.py index c70e300b7f..1b09bb6d00 100644 --- a/tmt/steps/discover/__init__.py +++ b/tmt/steps/discover/__init__.py @@ -154,7 +154,6 @@ def log_import_plan_details(self) -> None: def post_dist_git(self, created_content: list[Path]) -> None: """ Discover tests after dist-git applied patches """ - pass class Discover(tmt.steps.Step): diff --git a/tmt/steps/discover/fmf.py b/tmt/steps/discover/fmf.py index a09c47561d..88fa1fbd9c 100644 --- a/tmt/steps/discover/fmf.py +++ b/tmt/steps/discover/fmf.py @@ -278,11 +278,11 @@ def is_in_standalone_mode(self) -> bool: return True return super().is_in_standalone_mode - def get_git_root(self, dir: Path) -> Path: + def get_git_root(self, directory: Path) -> Path: """ Find git root of the path """ output = self.run( Command("git", "rev-parse", "--show-toplevel"), - cwd=dir, + cwd=directory, ignore_dry=True) assert output.stdout is not None return Path(output.stdout.strip("\n")) diff --git a/tmt/steps/execute/__init__.py b/tmt/steps/execute/__init__.py index 6c1c60872e..208e33b62e 100644 --- a/tmt/steps/execute/__init__.py +++ b/tmt/steps/execute/__init__.py @@ -382,7 +382,7 @@ def handle_reboot(self) -> bool: raise except tmt.utils.ProvisionError: - self.logger.warn( + self.logger.warning( "Guest does not support soft reboot, trying hard reboot.") rebooted = self.guest.reboot(hard=True, timeout=timeout) diff --git a/tmt/steps/execute/internal.py b/tmt/steps/execute/internal.py index e3c21914ef..e665ff6b30 100644 --- a/tmt/steps/execute/internal.py +++ b/tmt/steps/execute/internal.py @@ -32,7 +32,7 @@ TEST_PIDFILE_LOCK_FILENAME = f'{TEST_PIDFILE_FILENAME}.lock' #: The default directory for storing test pid file. -TEST_PIDFILE_ROOT = Path('/var/tmp') +TEST_PIDFILE_ROOT = Path('/var/tmp') # noqa: S108 insecure usage of temporary dir def effective_pidfile_root() -> Path: @@ -393,7 +393,8 @@ def _save_process( if self.data.interactive: if test.duration: - logger.warn('Ignoring requested duration, not supported in interactive mode.') + logger.warning( + 'Ignoring requested duration, not supported in interactive mode.') timeout = None @@ -423,7 +424,7 @@ def _save_process( logger.debug(f"Test duration '{test.duration}' exceeded.") elif tmt.utils.ProcessExitCodes.is_pidfile(invocation.return_code): - logger.warn('Test failed to manage its pidfile.') + logger.warning('Test failed to manage its pidfile.') with invocation.process_lock: invocation.process = None diff --git a/tmt/steps/prepare/__init__.py b/tmt/steps/prepare/__init__.py index 9276a6f6d5..919f00cdce 100644 --- a/tmt/steps/prepare/__init__.py +++ b/tmt/steps/prepare/__init__.py @@ -47,7 +47,7 @@ class PrepareStepData(tmt.steps.WhereableStepData, tmt.steps.StepData): PrepareStepDataT = TypeVar('PrepareStepDataT', bound=PrepareStepData) -class _RawPrepareStepData(tmt.steps._RawStepData, tmt.steps._RawWhereableStepData, total=False): +class _RawPrepareStepData(tmt.steps._RawStepData, tmt.steps.RawWhereableStepData, total=False): pass diff --git a/tmt/steps/prepare/install.py b/tmt/steps/prepare/install.py index c303f1b11d..b59fecd437 100644 --- a/tmt/steps/prepare/install.py +++ b/tmt/steps/prepare/install.py @@ -152,19 +152,15 @@ def prepare_install_local(self) -> None: def install_from_repository(self) -> None: """ Default base install method for packages from repositories """ - pass def install_local(self) -> None: """ Default base install method for local packages """ - pass def install_from_url(self) -> None: """ Default base install method for packages which are from URL """ - pass def install_debuginfo(self) -> None: """ Default base install method for debuginfo packages """ - pass def install(self) -> None: """ Perform the actual installation """ diff --git a/tmt/steps/provision/__init__.py b/tmt/steps/provision/__init__.py index c9473de7a7..ccbca9f608 100644 --- a/tmt/steps/provision/__init__.py +++ b/tmt/steps/provision/__init__.py @@ -3,8 +3,8 @@ import datetime import enum import os -import random import re +import secrets import shlex import signal as _signal import string @@ -778,7 +778,7 @@ def _random_name(self, prefix: str = '', length: int = 16) -> str: # Append at least 5 random characters min_random_part = max(5, length - len(prefix)) name = prefix + ''.join( - random.choices(string.ascii_letters, k=min_random_part)) + secrets.choice(string.ascii_letters) for _ in range(min_random_part)) # Return tail (containing random characters) of name return name[-length:] @@ -956,11 +956,9 @@ def _ansible_playbook_path(self, playbook: Path) -> Path: self.debug(f"Applying playbook '{playbook}' on guest '{self.primary_address}'.") # FIXME: cast() - https://github.com/teemtee/tmt/issues/1372 parent = cast(Provision, self.parent) - assert parent.plan.my_run is not None # narrow type - assert parent.plan.my_run.tree is not None # narrow type - assert parent.plan.my_run.tree.root is not None # narrow type + assert parent.plan.fmf_root is not None # narrow type # Playbook paths should be relative to the metadata tree root - playbook = parent.plan.my_run.tree.root / playbook.unrooted() + playbook = parent.plan.fmf_root / playbook.unrooted() self.debug(f"Playbook full path: '{playbook}'", level=2) return playbook @@ -1391,11 +1389,10 @@ def _ssh_guest(self) -> str: def _ssh_master_socket_path(self) -> Path: """ Return path to the SSH master socket """ - # Use '/run/user/uid' if it exists, '/tmp' otherwise. + # Use '/run/user/uid' if it exists, 'temp dir' otherwise. run_dir = Path(f"/run/user/{os.getuid()}") - socket_dir = run_dir / "tmt" if run_dir.is_dir() else Path("/tmp") - socket_dir.mkdir(exist_ok=True) - return Path(tempfile.mktemp(dir=socket_dir)) + socket_dir = run_dir if run_dir.is_dir() else Path(tempfile.mkdtemp()) + return socket_dir / "tmt" @property def _ssh_options(self) -> Command: @@ -1481,7 +1478,7 @@ def _cleanup_ssh_master_process( self._ssh_master_process.wait(timeout=3) except subprocess.TimeoutExpired: - logger.warn( + logger.warning( f'Terminating the SSH master process {self._ssh_master_process.pid}' ' timed out.') diff --git a/tmt/steps/provision/mrack.py b/tmt/steps/provision/mrack.py index e11843f8d0..414604096b 100644 --- a/tmt/steps/provision/mrack.py +++ b/tmt/steps/provision/mrack.py @@ -204,7 +204,7 @@ def _transform_unsupported( # sure user is aware it would have no effect, and since we have to return # something, return an empty `or` group - no harm done, composable with other # elements. - logger.warn(f"Hardware requirement '{constraint.printable_name}' will have no effect.") + logger.warning(f"Hardware requirement '{constraint.printable_name}' will have no effect.") return MrackHWOrGroup() diff --git a/tmt/steps/provision/testcloud.py b/tmt/steps/provision/testcloud.py index c0d896f253..ff80b01471 100644 --- a/tmt/steps/provision/testcloud.py +++ b/tmt/steps/provision/testcloud.py @@ -412,12 +412,12 @@ def _apply_hw_tpm( for constraint in tpm_constraints: if constraint.operator not in TPM_VERSION_ALLOWED_OPERATORS: - logger.warn( + logger.warning( f"Cannot apply hardware requirement '{constraint}', operator not supported.") return if constraint.value not in TPM_VERSION_SUPPORTED_VERSIONS[TPM_CONFIG_ALLOWS_VERSIONS]: - logger.warn( + logger.warning( f"Cannot apply hardware requirement '{constraint}'," " TPM version not supported.") return @@ -1101,13 +1101,13 @@ def go(self) -> None: if data.memory is not None and data.hardware.constraint.uses_constraint( 'memory', self._logger): - self._logger.warn( + self._logger.warning( "Hardware requirement 'memory' is specified in 'hardware' key," " it will be overruled by 'memory' key.") if data.disk is not None and data.hardware.constraint.uses_constraint( 'disk.size', self._logger): - self._logger.warn( + self._logger.warning( "Hardware requirement 'disk.size' is specified in 'hardware' key," " it will be overruled by 'disk' key.") diff --git a/tmt/steps/report/html.py b/tmt/steps/report/html.py index dcb138c86d..7f44637940 100644 --- a/tmt/steps/report/html.py +++ b/tmt/steps/report/html.py @@ -57,7 +57,6 @@ class ReportHtml(tmt.steps.report.ReportPlugin[ReportHtmlData]): def prune(self, logger: tmt.log.Logger) -> None: """ Do not prune generated html report """ - pass def go(self) -> None: """ Process results """ diff --git a/tmt/steps/report/html/template.html.j2 b/tmt/steps/report/html/template.html.j2 index 88c118466b..7153110c41 100644 --- a/tmt/steps/report/html/template.html.j2 +++ b/tmt/steps/report/html/template.html.j2 @@ -1,4 +1,5 @@ + Test results of {{ plan.name }}