diff --git a/Makefile b/Makefile index 929aa005e8..677d684176 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ uninstall: $(PYTHON) setup.py develop --uninstall $(PYTHON_DEVELOP_ARGS) requirements-dev: pip - - $(PYTHON) -m pip install -r requirements-dev.txt $(PYTHON_DEVELOP_ARGS) + $(PYTHON) -m pip install -r requirements-dev.txt $(PYTHON_DEVELOP_ARGS) smokecheck: clean uninstall develop $(PYTHON) -m avocado run examples/tests/passtest.py diff --git a/avocado/core/dependencies/dependency.py b/avocado/core/dependencies/dependency.py new file mode 100644 index 0000000000..f9d2323321 --- /dev/null +++ b/avocado/core/dependencies/dependency.py @@ -0,0 +1,70 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# See LICENSE for more details. +# +# Copyright: Red Hat Inc. 2024 +# Authors: Jan Richter + +from avocado.core.nrunner.runnable import Runnable + + +class Dependency: + """ + Data holder for dependency. + """ + + def __init__(self, kind=None, uri=None, args=(), kwargs=None): + self._kind = kind + self._uri = uri + self._args = args + self._kwargs = kwargs or {} + + @property + def kind(self): + return self._kind + + @property + def uri(self): + return self._uri + + @property + def args(self): + return self._args + + @property + def kwargs(self): + return self._kwargs + + def __hash__(self): + return hash( + ( + self.kind, + self.uri, + tuple(sorted(self.args)), + tuple(sorted(self.kwargs.items())), + ) + ) + + def __eq__(self, other): + if isinstance(other, Dependency): + return hash(self) == hash(other) + return False + + def to_runnable(self, config): + return Runnable(self.kind, self.uri, *self.args, config=config, **self.kwargs) + + @classmethod + def from_dictionary(cls, dictionary): + return cls( + dictionary.pop("type", None), + dictionary.pop("uri", None), + dictionary.pop("args", ()), + dictionary, + ) diff --git a/avocado/core/nrunner/runnable.py b/avocado/core/nrunner/runnable.py index 5cffdeadb0..7b49ec937a 100644 --- a/avocado/core/nrunner/runnable.py +++ b/avocado/core/nrunner/runnable.py @@ -80,7 +80,7 @@ def __init__(self, kind, uri, *args, config=None, **kwargs): #: attr:`avocado.core.nrunner.runner.BaseRunner.CONFIGURATION_USED` self._config = {} if config is None: - config = self.filter_runnable_config(kind, {}) + config = self.add_configuration_used(kind, {}) self.config = config or {} self.args = args self.tags = kwargs.pop("tags", None) @@ -176,10 +176,10 @@ def config(self, config): configuration_used = Runnable.get_configuration_used_by_kind(self.kind) if not set(configuration_used).issubset(set(config.keys())): LOG.warning( - "The runnable config should have only values " - "essential for its runner. In the next version of " - "avocado, this will raise a Value Error. Please " - "use avocado.core.nrunner.runnable.Runnable.filter_runnable_config " + "The runnable config should have values essential for its runner. " + "In this case, it's missing some of the used configuration. In a " + "future avocado version this will raise a ValueError. Please " + "use avocado.core.nrunner.runnable.Runnable.add_configuration_used " "or avocado.core.nrunner.runnable.Runnable.from_avocado_config" ) self._config = config @@ -221,7 +221,7 @@ def from_avocado_config(cls, kind, uri, *args, config=None, **kwargs): """Creates runnable with only essential config for runner of specific kind.""" if not config: config = {} - config = cls.filter_runnable_config(kind, config) + config = cls.add_configuration_used(kind, config) return cls(kind, uri, *args, config=config, **kwargs) @classmethod @@ -245,30 +245,30 @@ def get_configuration_used_by_kind(cls, kind): return configuration_used @classmethod - def filter_runnable_config(cls, kind, config): + def add_configuration_used(cls, kind, config): """ - Returns only essential values for specific runner. + Adds essential configuration values for specific runner. - It will use configuration from argument completed by values from - config file and avocado default configuration. + It will add missing configuration in the given config, + complementing it with values from config file and avocado default + configuration. :param kind: Kind of runner which should use the configuration. :type kind: str - :param config: Configuration values for runner. If some values will be - missing the default ones and from config file will be - used. + :param config: Configuration values for runner. If any used configuration + values are missing, the default ones and from config file + will be used. :type config: dict - :returns: Config dict, which has only values essential for runner - based on STANDALONE_EXECUTABLE_CONFIG_USED + :returns: Config dict, which has existing entries plus values + essential for runner based on + STANDALONE_EXECUTABLE_CONFIG_USED :rtype: dict """ whole_config = settings.as_dict() - filtered_config = {} for config_item in cls.get_configuration_used_by_kind(kind): - filtered_config[config_item] = config.get( - config_item, whole_config.get(config_item) - ) - return filtered_config + if config_item not in config: + config[config_item] = whole_config.get(config_item) + return config def get_command_args(self): """ diff --git a/avocado/core/nrunner/task.py b/avocado/core/nrunner/task.py index 7844927177..a40bf4a0e6 100644 --- a/avocado/core/nrunner/task.py +++ b/avocado/core/nrunner/task.py @@ -40,29 +40,46 @@ class TaskStatusService: def __init__(self, uri): self.uri = uri - self.connection = None + self._connection = None - def post(self, status): + @property + def connection(self): + if not self._connection: + self._create_connection() + return self._connection + + def _create_connection(self): + """ + Creates connection with `self.uri` based on `socket.create_connection` + """ if ":" in self.uri: host, port = self.uri.split(":") port = int(port) - if self.connection is None: - for _ in range(600): - try: - self.connection = socket.create_connection((host, port)) - break - except ConnectionRefusedError as error: - LOG.warning(error) - time.sleep(1) - else: - self.connection = socket.create_connection((host, port)) + for _ in range(600): + try: + self._connection = socket.create_connection((host, port)) + break + except ConnectionRefusedError as error: + LOG.warning(error) + time.sleep(1) + else: + self._connection = socket.create_connection((host, port)) else: - if self.connection is None: - self.connection = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.connection.connect(self.uri) + self._connection = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self._connection.connect(self.uri) + def post(self, status): data = json_dumps(status) - self.connection.send(data.encode("ascii") + "\n".encode("ascii")) + try: + self.connection.send(data.encode("ascii") + "\n".encode("ascii")) + except BrokenPipeError: + try: + self._create_connection() + self.connection.send(data.encode("ascii") + "\n".encode("ascii")) + except ConnectionRefusedError: + LOG.warning(f"Connection with {self.uri} has been lost.") + return False + return True def close(self): if self.connection is not None: @@ -203,12 +220,23 @@ def run(self): self.setup_output_dir() runner_klass = self.runnable.pick_runner_class() runner = runner_klass() + running_status_services = self.status_services + damaged_status_services = [] for status in runner.run(self.runnable): if status["status"] == "started": status.update({"output_dir": self.runnable.output_dir}) status.update({"id": self.identifier}) if self.job_id is not None: status.update({"job_id": self.job_id}) - for status_service in self.status_services: - status_service.post(status) + for status_service in running_status_services: + if not status_service.post(status): + damaged_status_services.append(status_service) + if damaged_status_services: + running_status_services = list( + filter( + lambda s: s not in damaged_status_services, + running_status_services, + ) + ) + damaged_status_services.clear() yield status diff --git a/avocado/core/safeloader/docstring.py b/avocado/core/safeloader/docstring.py index 66f4e08ef3..0e4fa7f698 100644 --- a/avocado/core/safeloader/docstring.py +++ b/avocado/core/safeloader/docstring.py @@ -1,6 +1,8 @@ import json import re +from avocado.core.dependencies.dependency import Dependency + #: Gets the docstring directive value from a string. Used to tweak #: test behavior in various ways DOCSTRING_DIRECTIVE_RE_RAW = ( @@ -78,7 +80,9 @@ def get_docstring_directives_dependencies(docstring): if item.startswith("dependency="): _, dependency_str = item.split("dependency=", 1) try: - dependencies.append(json.loads(dependency_str)) + dependencies.append( + Dependency.from_dictionary(json.loads(dependency_str)) + ) except json.decoder.JSONDecodeError: # ignore dependencies in case of malformed dictionary continue diff --git a/avocado/plugins/dependency.py b/avocado/plugins/dependency.py index 7ee2b0f814..7516ca8d02 100644 --- a/avocado/plugins/dependency.py +++ b/avocado/plugins/dependency.py @@ -12,7 +12,6 @@ # Copyright: Red Hat Inc. 2021 # Authors: Willian Rampazzo -from avocado.core.nrunner.runnable import Runnable from avocado.core.plugin_interfaces import PreTest @@ -33,15 +32,7 @@ def pre_test_runnables(test_runnable, suite_config=None): # pylint: disable=W02 if not test_runnable.dependencies: return [] dependency_runnables = [] - for dependency in test_runnable.dependencies: - # make a copy to change the dictionary and do not affect the - # original `dependencies` dictionary from the test - dependency_copy = dependency.copy() - kind = dependency_copy.pop("type") - uri = dependency_copy.pop("uri", None) - args = dependency_copy.pop("args", ()) - dependency_runnable = Runnable( - kind, uri, *args, config=test_runnable.config, **dependency_copy - ) - dependency_runnables.append(dependency_runnable) + unique_dependencies = list(dict.fromkeys(test_runnable.dependencies)) + for dependency in unique_dependencies: + dependency_runnables.append(dependency.to_runnable(test_runnable.config)) return dependency_runnables diff --git a/avocado/plugins/runner_nrunner.py b/avocado/plugins/runner_nrunner.py index 537ffc1c6b..856afb2306 100644 --- a/avocado/plugins/runner_nrunner.py +++ b/avocado/plugins/runner_nrunner.py @@ -222,7 +222,7 @@ def _update_avocado_configuration_used_on_runnables(runnables, config): :type config: dict """ for runnable in runnables: - runnable.config = Runnable.filter_runnable_config(runnable.kind, config) + runnable.config = Runnable.add_configuration_used(runnable.kind, config) def _determine_status_server(self, test_suite, config_key): if test_suite.config.get("run.status_server_auto"): diff --git a/avocado/utils/archive.py b/avocado/utils/archive.py index 9bd93a6138..edf901562d 100644 --- a/avocado/utils/archive.py +++ b/avocado/utils/archive.py @@ -116,7 +116,13 @@ def is_zstd_file(path): return zstd_file.read(len(ZSTD_MAGIC)) == ZSTD_MAGIC -def _probe_zstd_cmd(): +def probe_zstd_cmd(): + """ + Attempts to find a suitable zstd tool that behaves as expected + + :rtype: str or None + :returns: path to a suitable zstd executable or None if not found + """ zstd_cmd = shutil.which("zstd") if zstd_cmd is not None: proc = subprocess.run( @@ -136,7 +142,7 @@ def zstd_uncompress(path, output_path=None, force=False): """ Extracts a zstd compressed file. """ - zstd_cmd = _probe_zstd_cmd() + zstd_cmd = probe_zstd_cmd() if not zstd_cmd: raise ArchiveException("Unable to find a suitable zstd compression tool") output_path = _decide_on_path(path, ".zst", output_path) diff --git a/optional_plugins/html/tests/html_result.py b/optional_plugins/html/tests/html_result.py index 65f9fc5b3f..f51906010c 100644 --- a/optional_plugins/html/tests/html_result.py +++ b/optional_plugins/html/tests/html_result.py @@ -82,8 +82,8 @@ def test_output_incompatible_setup(self): def test_output_compatible_setup_2(self): prefix = "avocado_" + __name__ - tmpfile = tempfile.mktemp(prefix=prefix, dir=self.tmpdir.name) - tmpfile2 = tempfile.mktemp(prefix=prefix, dir=self.tmpdir.name) + tmpfile = os.path.join(self.tmpdir.name, f"{prefix}_result.xml") + tmpfile2 = os.path.join(self.tmpdir.name, f"{prefix}_result.json") tmpdir = tempfile.mkdtemp(prefix=prefix, dir=self.tmpdir.name) tmpfile3 = os.path.join(tmpdir, "result.html") cmd_line = ( diff --git a/requirements-dev.txt b/requirements-dev.txt index 12a588712b..f76fab4b53 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -10,7 +10,7 @@ black==22.3.0 coverage==5.5 # To run make check -psutil==5.8.0 +psutil==5.9.5 # pycdlib is an optional requirement in production # but is necessary for selftests diff --git a/selftests/check.py b/selftests/check.py index 0e516511de..693c6cb022 100755 --- a/selftests/check.py +++ b/selftests/check.py @@ -26,11 +26,11 @@ "job-api-6": 4, "job-api-7": 1, "nrunner-interface": 70, - "nrunner-requirement": 16, - "unit": 667, + "nrunner-requirement": 20, + "unit": 668, "jobs": 11, - "functional-parallel": 297, - "functional-serial": 4, + "functional-parallel": 298, + "functional-serial": 5, "optional-plugins": 0, "optional-plugins-golang": 2, "optional-plugins-html": 3, diff --git a/selftests/functional/basic.py b/selftests/functional/basic.py index 7a9679e556..c0875e7a76 100644 --- a/selftests/functional/basic.py +++ b/selftests/functional/basic.py @@ -819,7 +819,9 @@ def check_matplotlib_logs(file_path): self.assertTrue(os.path.exists(file_path)) with open(file_path, encoding="utf-8") as file: stream = file.read() - self.assertIn("matplotlib __init__ L0337 DEBUG|", stream) + self.assertTrue( + re.match(r"matplotlib __init__ L[0-9]* DEBUG|", stream) + ) log_dir = os.path.join(self.tmpdir.name, "latest") test_log_dir = os.path.join( diff --git a/selftests/functional/nrunner.py b/selftests/functional/nrunner.py index 0601fa7bc2..33bc84c276 100644 --- a/selftests/functional/nrunner.py +++ b/selftests/functional/nrunner.py @@ -1,5 +1,6 @@ import os import sys +import time import unittest from avocado.core.job import Job @@ -265,6 +266,25 @@ def test_recipe_exec_test_3(self): self.assertEqual(res.exit_status, 0) +class TaskRunStatusService(TestCaseTmpDir): + @skipUnlessPathExists("/bin/sleep") + @skipUnlessPathExists("/bin/nc") + def test_task_status_service_lost(self): + nc_path = os.path.join(self.tmpdir.name, "socket") + nc_proc = process.SubProcess(f"nc -lU {nc_path}") + nc_proc.start() + task_proc = process.SubProcess( + f"avocado-runner-exec-test task-run -i 1 -u /bin/sleep -a 3 -s {nc_path}" + ) + task_proc.start() + time.sleep(1) + nc_proc.kill() + time.sleep(1) + self.assertIn( + f"Connection with {nc_path} has been lost.".encode(), task_proc.get_stderr() + ) + + class ResolveSerializeRun(TestCaseTmpDir): @skipUnlessPathExists("/bin/true") def test(self): diff --git a/selftests/functional/output.py b/selftests/functional/output.py index a3f4fe6fef..30482305bb 100644 --- a/selftests/functional/output.py +++ b/selftests/functional/output.py @@ -2,7 +2,6 @@ import os import re import shlex -import tempfile import unittest from xml.dom import minidom @@ -362,7 +361,7 @@ def test_output_incompatible_setup(self): ) def test_output_compatible_setup(self): - tmpfile = tempfile.mktemp(dir=self.tmpdir.name) + tmpfile = os.path.join(self.tmpdir.name, f"avocado_{__name__}.xml") cmd_line = ( f"{AVOCADO} run --job-results-dir {self.tmpdir.name} " f"--disable-sysinfo --journal --xunit {tmpfile} " @@ -380,7 +379,7 @@ def test_output_compatible_setup(self): minidom.parse(tmpfile) def test_output_compatible_setup_2(self): - tmpfile = tempfile.mktemp(dir=self.tmpdir.name) + tmpfile = os.path.join(self.tmpdir.name, f"avocado_{__name__}.json") cmd_line = ( f"{AVOCADO} run --job-results-dir {self.tmpdir.name} " f"--disable-sysinfo --xunit - --json {tmpfile} " @@ -401,8 +400,8 @@ def test_output_compatible_setup_2(self): minidom.parseString(result.stdout_text) def test_output_compatible_setup_nooutput(self): - tmpfile = tempfile.mktemp(dir=self.tmpdir.name) - tmpfile2 = tempfile.mktemp(dir=self.tmpdir.name) + tmpfile = os.path.join(self.tmpdir.name, f"avocado_{__name__}.xml") + tmpfile2 = os.path.join(self.tmpdir.name, f"avocado_{__name__}.json") # Verify --show=none can be supplied as app argument cmd_line = ( f"{AVOCADO} --show=none run " @@ -490,7 +489,7 @@ def test_silent_trumps_test(self): self.assertEqual(result.stdout, b"") def test_verify_whiteboard_save(self): - tmpfile = tempfile.mktemp(dir=self.tmpdir.name) + tmpfile = os.path.join(self.tmpdir.name, f"avocado_{__name__}.json") config = os.path.join(self.tmpdir.name, "conf.ini") content = ( "[datadir.paths]\nlogs_dir = %s" # pylint: disable=C0209 @@ -527,7 +526,7 @@ def test_verify_whiteboard_save(self): ) def test_gendata(self): - tmpfile = tempfile.mktemp(dir=self.tmpdir.name) + tmpfile = os.path.join(self.tmpdir.name, f"avocado_{__name__}.json") cmd_line = ( f"{AVOCADO} run --job-results-dir {self.tmpdir.name} " f"--disable-sysinfo " @@ -555,7 +554,9 @@ def test_gendata(self): ) def test_redirect_output(self): - redirected_output_path = tempfile.mktemp(dir=self.tmpdir.name) + redirected_output_path = os.path.join( + self.tmpdir.name, f"avocado_{__name__}_output" + ) cmd_line = ( f"{AVOCADO} run --job-results-dir {self.tmpdir.name} " f"--disable-sysinfo examples/tests/passtest.py > {redirected_output_path}" diff --git a/selftests/functional/serial/requirements.py b/selftests/functional/serial/requirements.py index 6285bc6823..c194e94c96 100644 --- a/selftests/functional/serial/requirements.py +++ b/selftests/functional/serial/requirements.py @@ -93,6 +93,26 @@ def test_c(self): """ ''' +SINGLE_SUCCESS_DUPLCITIES = '''from avocado import Test +from avocado.utils import process + + +class SuccessTest(Test): + + def check_hello(self): + result = process.run("hello", ignore_status=True) + self.assertEqual(result.exit_status, 0) + self.assertIn('Hello, world!', result.stdout_text,) + + def test_a(self): + """ + :avocado: dependency={"type": "package", "name": "hello"} + :avocado: dependency={"type": "package", "name": "hello"} + :avocado: dependency={"type": "package", "name": "hello"} + """ + self.check_hello() +''' + class BasicTest(TestCaseTmpDir, Test): @@ -223,6 +243,20 @@ def test_multiple_fails(self): result.stdout_text, ) + @skipUnless(os.getenv("CI"), skip_install_message) + def test_dependency_duplicates(self): + with script.Script( + os.path.join(self.tmpdir.name, "test_single_success.py"), + SINGLE_SUCCESS_DUPLCITIES, + ) as test: + command = self.get_command(test.path) + result = process.run(command, ignore_status=True) + self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK) + self.assertIn( + "PASS 1", + result.stdout_text, + ) + if __name__ == "__main__": unittest.main() diff --git a/selftests/style.sh b/selftests/style.sh index 18ad91b79f..511bccf09a 100755 --- a/selftests/style.sh +++ b/selftests/style.sh @@ -1,4 +1,4 @@ #!/bin/sh -e echo "** Running black..." -black --check --diff --color . +python3 -m black --check --diff --color . diff --git a/selftests/unit/dependencies_resolver.py b/selftests/unit/dependencies_resolver.py index 8c9788dd36..c5b5b7a1ef 100644 --- a/selftests/unit/dependencies_resolver.py +++ b/selftests/unit/dependencies_resolver.py @@ -1,5 +1,6 @@ import unittest +from avocado.core.dependencies.dependency import Dependency from avocado.core.nrunner.runnable import Runnable from avocado.plugins.dependency import DependencyResolver @@ -12,8 +13,8 @@ def test_dependencies_runnables(self): kind="package", uri=None, dependencies=[ - {"type": "package", "name": "foo"}, - {"type": "package", "name": "bar"}, + Dependency("package", kwargs={"name": "foo"}), + Dependency("package", kwargs={"name": "bar"}), ], ) dependency_runnables = DependencyResolver.pre_test_runnables(runnable) diff --git a/selftests/unit/nrunner.py b/selftests/unit/nrunner.py index 8cd1957e6e..2c3133cb67 100644 --- a/selftests/unit/nrunner.py +++ b/selftests/unit/nrunner.py @@ -77,6 +77,23 @@ def test_recipe_exec(self): self.assertEqual(runnable.args, ("/etc/profile",)) self.assertEqual(runnable.kwargs, {"TERM": "vt3270"}) + def test_recipe_config(self): + open_mocked = unittest.mock.mock_open( + read_data=( + '{"kind": "exec-test", "uri": "/bin/sh", ' + '"args": ["/etc/profile"], ' + '"config": {"runner.identifier_format": "{uri}-{args[0]}"}}' + ) + ) + with unittest.mock.patch("builtins.open", open_mocked): + runnable = Runnable.from_recipe("fake_path") + configuration_used = ["run.keep_tmp", "runner.exectest.exitcodes.skip"] + for conf in configuration_used: + self.assertIn(conf, runnable.config) + self.assertEqual( + runnable.config.get("runner.identifier_format"), "{uri}-{args[0]}" + ) + def test_identifier_args(self): config = {"runner.identifier_format": "{uri}-{args[0]}"} runnable = Runnable("exec-test", "uri", "arg1", "arg2", config=config) @@ -321,18 +338,18 @@ def test_runner_python_unittest_skip(self): runner_klass = runnable.pick_runner_class() runner = runner_klass() results = [status for status in runner.run(runnable)] - if sys.version_info < (3, 12, 1): + if sys.version_info == (3, 12, 1): output1 = ( b"----------------------------------------------------------------------\n" - b"Ran 1 test in " + b"Ran 0 tests in " ) - output2 = b"s\n\nOK (skipped=1)\n" + output2 = b"\n\nNO TESTS RAN (skipped=1)\n" else: output1 = ( b"----------------------------------------------------------------------\n" - b"Ran 0 tests in " + b"Ran 1 test in " ) - output2 = b"\n\nNO TESTS RAN (skipped=1)\n" + output2 = b"s\n\nOK (skipped=1)\n" output = results[-2] result = results[-1] self.assertEqual(result["status"], "finished") diff --git a/selftests/unit/safeloader_docstring.py b/selftests/unit/safeloader_docstring.py index 2c0e8b30ba..3545fcf88c 100644 --- a/selftests/unit/safeloader_docstring.py +++ b/selftests/unit/safeloader_docstring.py @@ -1,5 +1,6 @@ import unittest +from avocado.core.dependencies.dependency import Dependency from avocado.core.safeloader.docstring import ( DOCSTRING_DIRECTIVE_RE, check_docstring_directive, @@ -165,12 +166,12 @@ def test_get_dependency_empty(self): def test_dependency_single(self): raw = ':avocado: dependency={"foo":"bar"}' - exp = [{"foo": "bar"}] + exp = [Dependency(kwargs={"foo": "bar"})] self.assertEqual(get_docstring_directives_dependencies(raw), exp) def test_dependency_double(self): raw = ':avocado: dependency={"foo":"bar"}\n:avocado: dependency={"uri":"http://foo.bar"}' - exp = [{"foo": "bar"}, {"uri": "http://foo.bar"}] + exp = [Dependency(kwargs={"foo": "bar"}), Dependency(uri="http://foo.bar")] self.assertEqual(get_docstring_directives_dependencies(raw), exp) def test_directives_regex(self): diff --git a/selftests/unit/utils/archive.py b/selftests/unit/utils/archive.py index 1be2b06219..214c0231be 100644 --- a/selftests/unit/utils/archive.py +++ b/selftests/unit/utils/archive.py @@ -7,7 +7,7 @@ from avocado.utils import archive, crypto, data_factory from selftests.utils import BASEDIR, temp_dir_prefix -ZSTD_AVAILABLE = archive._probe_zstd_cmd() is not None +ZSTD_AVAILABLE = archive.probe_zstd_cmd() is not None class ArchiveTest(unittest.TestCase):