diff --git a/avocado/plugins/list.py b/avocado/plugins/list.py index dace3fcbe5..2cf67e26d1 100644 --- a/avocado/plugins/list.py +++ b/avocado/plugins/list.py @@ -214,6 +214,21 @@ def configure(self, parser): allow_multiple=True, ) + settings.add_argparser_to_option( + namespace="resolver.run_executables", + parser=parser, + long_arg="--resolver-run-executables", + allow_multiple=True, + ) + + settings.add_argparser_to_option( + namespace="resolver.exec_runnables_recipe.arguments", + metavar="ARGS", + parser=parser, + long_arg="--resolver-exec-arguments", + allow_multiple=True, + ) + help_msg = "Writes runnable recipe files to a directory." settings.register_option( section="list.recipes", diff --git a/avocado/plugins/resolvers.py b/avocado/plugins/resolvers.py index ea83206de1..1ff1c3761d 100644 --- a/avocado/plugins/resolvers.py +++ b/avocado/plugins/resolvers.py @@ -19,10 +19,12 @@ import json import os import re +import shlex +import subprocess from avocado.core.extension_manager import PluginPriority from avocado.core.nrunner.runnable import Runnable -from avocado.core.plugin_interfaces import Resolver +from avocado.core.plugin_interfaces import Init, Resolver from avocado.core.references import reference_split from avocado.core.resolver import ( ReferenceResolution, @@ -31,16 +33,12 @@ get_file_assets, ) from avocado.core.safeloader import find_avocado_tests, find_python_unittests +from avocado.core.settings import settings -class ExecTestResolver(Resolver): - - name = "exec-test" - description = "Test resolver for executable files to be handled as tests" - priority = PluginPriority.VERY_LOW - - def resolve(self, reference): - +class BaseExec: + @staticmethod + def check_exec(reference): criteria_check = check_file( reference, reference, @@ -52,6 +50,18 @@ def resolve(self, reference): if criteria_check is not True: return criteria_check + +class ExecTestResolver(BaseExec, Resolver): + + name = "exec-test" + description = "Test resolver for executable files to be handled as tests" + priority = PluginPriority.VERY_LOW + + def resolve(self, reference): + exec_criteria = self.check_exec(reference) + if exec_criteria is not None: + return exec_criteria + runnable = Runnable("exec-test", reference, assets=get_file_assets(reference)) return ReferenceResolution( reference, ReferenceResolutionResult.SUCCESS, [runnable] @@ -121,24 +131,16 @@ def resolve(self, reference): ) -class TapResolver(Resolver): +class TapResolver(BaseExec, Resolver): name = "tap" description = "Test resolver for executable files to be handled as TAP tests" priority = PluginPriority.LAST_RESORT def resolve(self, reference): - - criteria_check = check_file( - reference, - reference, - suffix=None, - type_name="executable file", - access_check=os.R_OK | os.X_OK, - access_name="executable", - ) - if criteria_check is not True: - return criteria_check + exec_criteria = self.check_exec(reference) + if exec_criteria is not None: + return exec_criteria runnable = Runnable("tap", reference, assets=get_file_assets(reference)) return ReferenceResolution( @@ -196,3 +198,102 @@ def resolve(self, reference): return criteria_check return self._validate_and_load_runnables(reference) + + +class ExecRunnablesRecipeInit(Init): + name = "exec-runnables-recipe" + description = 'Configuration for resolver plugin "exec-runnables-recipe" plugin' + + def initialize(self): + help_msg = ( + 'Whether resolvers (such as "exec-runnables-recipe") should ' + "execute files given as test references that have executable " + "permissions. This is disabled by default due to security " + "implications of running executables that may not be trusted." + ) + settings.register_option( + section="resolver", + key="run_executables", + key_type=bool, + default=False, + help_msg=help_msg, + ) + + help_msg = ( + "Command line options (space separated) that will be added " + "to the executable when executing it as a producer of " + "runnables-recipe JSON content." + ) + settings.register_option( + section="resolver.exec_runnables_recipe", + key="arguments", + key_type=str, + default="", + help_msg=help_msg, + ) + + +class ExecRunnablesRecipeResolver(BaseExec, Resolver): + name = "exec-runnables-recipe" + description = "Test resolver for executables that output JSON runnable recipes" + priority = PluginPriority.LOW + + def resolve(self, reference): + if not self.config.get("resolver.run_executables"): + return ReferenceResolution( + reference, + ReferenceResolutionResult.NOTFOUND, + info=( + "Running executables is not enabled. Refer to " + '"resolver.run_executables" configuration option' + ), + ) + + exec_criteria = self.check_exec(reference) + if exec_criteria is not None: + return exec_criteria + + args = self.config.get("resolver.exec_runnables_recipe.arguments") + if args: + cmd = [reference] + shlex.split(args) + else: + cmd = reference + try: + process = subprocess.Popen( + cmd, + stdin=subprocess.DEVNULL, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + except (FileNotFoundError, PermissionError) as exc: + return ReferenceResolution( + reference, + ReferenceResolutionResult.NOTFOUND, + info=(f'Failure while running running executable "{reference}": {exc}'), + ) + + content, _ = process.communicate() + try: + runnables = json.loads(content) + except json.JSONDecodeError: + return ReferenceResolution( + reference, + ReferenceResolutionResult.NOTFOUND, + info=f'Content generated by running executable "{reference}" is not JSON', + ) + + if not ( + isinstance(runnables, list) + and all([isinstance(r, dict) for r in runnables]) + ): + return ReferenceResolution( + reference, + ReferenceResolutionResult.NOTFOUND, + info=f"Content generated by running executable {reference} does not look like a runnables recipe JSON content", + ) + + return ReferenceResolution( + reference, + ReferenceResolutionResult.SUCCESS, + [Runnable.from_dict(r) for r in runnables], + ) diff --git a/avocado/plugins/run.py b/avocado/plugins/run.py index e66f9a30e8..85ff9aaf42 100644 --- a/avocado/plugins/run.py +++ b/avocado/plugins/run.py @@ -267,6 +267,21 @@ def configure(self, parser): long_arg="--log-test-data-directories", ) + settings.add_argparser_to_option( + namespace="resolver.run_executables", + parser=parser, + long_arg="--resolver-run-executables", + allow_multiple=True, + ) + + settings.add_argparser_to_option( + namespace="resolver.exec_runnables_recipe.arguments", + metavar="ARGS", + parser=parser, + long_arg="--resolver-exec-arguments", + allow_multiple=True, + ) + parser_common_args.add_tag_filter_args(parser) def run(self, config): diff --git a/docs/source/guides/writer/chapters/recipes.rst b/docs/source/guides/writer/chapters/recipes.rst index 51f78be9b2..01855acf6a 100644 --- a/docs/source/guides/writer/chapters/recipes.rst +++ b/docs/source/guides/writer/chapters/recipes.rst @@ -76,3 +76,46 @@ That will be parsed by the ``runnables-recipe`` resolver, like in exec-test /bin/true exec-test /bin/false + +Using dynamically generated recipes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``exec-runnables-recipe`` resolver allows a user to point to a +file that will be executed, and that is expected to generate (on its +``STDOUT``) content compatible with the Runnable recipe format +mentioned previously. + +.. note:: For security reasons, Avocado won't execute files + indiscriminately when looking for tests (at the resolution + phase). One must set the ``--resolver-run-executables`` + command line option (or the underlying + ``resolver.run_executables`` configuration option) to allow + running executables at the resolver stage. + +A script such as: + +.. literalinclude:: ../../../../../examples/nrunner/resolvers/exec_runnables_recipe.sh + +Will output JSON that is compatible with the runnable recipe format. +That can be used directly via either ``avocado list`` or ``avocado +run``. Example:: + + $ avocado list --resolver-run-executables examples/nrunner/resolvers/exec_runnables_recipe.sh + + exec-test true-test + exec-test false-test + +If the executable to be run needs arguments, you can pass it via the +``--resolver-exec-arguments`` or the underlying +``resolver.exec_runnable_recipe.arguments`` option. The following +script receives an optional parameter that can change the type of the +tests it generates: + +.. literalinclude:: ../../../../../examples/nrunner/resolvers/exec_runnables_recipe_kind.sh + +In order to have those tests resolved as ``tap`` tests, one can run:: + + $ avocado list --resolver-run-executables --resolver-exec-arguments tap examples/nrunner/resolvers/exec_runnables_recipe_kind.sh + + tap true-test + tap false-test diff --git a/examples/nrunner/resolvers/exec_runnables_recipe.sh b/examples/nrunner/resolvers/exec_runnables_recipe.sh new file mode 100755 index 0000000000..258ea91229 --- /dev/null +++ b/examples/nrunner/resolvers/exec_runnables_recipe.sh @@ -0,0 +1,2 @@ +#!/bin/sh +echo '[{"kind": "exec-test","uri": "/bin/true","identifier": "true-test"},{"kind": "exec-test","uri": "/bin/false","identifier": "false-test"}]' diff --git a/examples/nrunner/resolvers/exec_runnables_recipe_kind.sh b/examples/nrunner/resolvers/exec_runnables_recipe_kind.sh new file mode 100755 index 0000000000..29cc83fc86 --- /dev/null +++ b/examples/nrunner/resolvers/exec_runnables_recipe_kind.sh @@ -0,0 +1,3 @@ +#!/bin/sh +kind=${1:-exec-test} +echo "[{\"kind\": \"$kind\",\"uri\": \"/bin/true\",\"identifier\": \"true-test\"},{\"kind\": \"$kind\",\"uri\": \"/bin/false\",\"identifier\": \"false-test\"}]" diff --git a/selftests/.data/whiteboard.py b/selftests/.data/whiteboard.py deleted file mode 100755 index cb78689976..0000000000 --- a/selftests/.data/whiteboard.py +++ /dev/null @@ -1,35 +0,0 @@ -import base64 - -from avocado import Test - - -class WhiteBoard(Test): - """ - Simple test that saves test custom data to the test whiteboard - - :param whiteboard_data_file: File to be used as source for whiteboard data - :param whiteboard_data_size: Size of the generated data of the whiteboard - :param whiteboard_data_text: Text used when no file supplied - :param whiteboard_writes: How many times to copy the data into whiteboard - """ - - def test(self): - data_file = self.params.get("whiteboard_data_file", default="") - data_size = self.params.get("whiteboard_data_size", default="10") - if data_file: - self.log.info("Writing data to whiteboard from file: %s", data_file) - with open(data_file, "r", encoding="utf-8") as whiteboard_file: - size = int(data_size) - data = whiteboard_file.read(size) - else: - offset = int(data_size) - 1 - data = self.params.get( - "whiteboard_data_text", default="default whiteboard text" - )[0:offset] - - iterations = int(self.params.get("whiteboard_writes", default=1)) - - result = "" - for _ in range(0, iterations): - result += data - self.whiteboard = base64.encodebytes(result.encode()).decode("ascii") diff --git a/selftests/check.py b/selftests/check.py index b471b1bba8..02fbdc4354 100755 --- a/selftests/check.py +++ b/selftests/check.py @@ -29,7 +29,7 @@ "nrunner-requirement": 28, "unit": 678, "jobs": 11, - "functional-parallel": 309, + "functional-parallel": 312, "functional-serial": 7, "optional-plugins": 0, "optional-plugins-golang": 2, diff --git a/selftests/functional/resolver.py b/selftests/functional/resolver.py index f9b64449db..d7c2145293 100644 --- a/selftests/functional/resolver.py +++ b/selftests/functional/resolver.py @@ -10,7 +10,13 @@ # is also the same from selftests.functional.list import AVOCADO_TEST_OK as AVOCADO_INSTRUMENTED_TEST from selftests.functional.list import EXEC_TEST -from selftests.utils import AVOCADO, BASEDIR, TestCaseTmpDir, python_module_available +from selftests.utils import ( + AVOCADO, + BASEDIR, + TestCaseTmpDir, + python_module_available, + skipUnlessPathExists, +) class ResolverFunctional(unittest.TestCase): @@ -157,6 +163,65 @@ def test_runnable_recipe_origin(self): result.stdout, ) + @skipUnlessPathExists("/bin/sh") + def test_exec_runnable_recipe_disabled(self): + resolver_path = os.path.join( + BASEDIR, + "examples", + "nrunner", + "resolvers", + "exec_runnables_recipe.sh", + ) + cmd_line = f"{AVOCADO} -V list {resolver_path}" + result = process.run(cmd_line) + self.assertIn( + b"examples/nrunner/resolvers/exec_runnables_recipe.sh exec-test", + result.stdout, + ) + self.assertIn(b"exec-test: 1\n", result.stdout) + + @skipUnlessPathExists("/bin/sh") + def test_exec_runnable_recipe_enabled(self): + resolver_path = os.path.join( + BASEDIR, + "examples", + "nrunner", + "resolvers", + "exec_runnables_recipe.sh", + ) + cmd_line = f"{AVOCADO} -V list --resolver-run-executables {resolver_path}" + result = process.run(cmd_line) + self.assertIn( + b"exec-test true-test /bin/true exec-runnables-recipe", + result.stdout, + ) + self.assertIn( + b"exec-test false-test /bin/false exec-runnables-recipe", + result.stdout, + ) + self.assertIn(b"exec-test: 2\n", result.stdout) + + @skipUnlessPathExists("/bin/sh") + def test_exec_runnable_recipe_args(self): + resolver_path = os.path.join( + BASEDIR, + "examples", + "nrunner", + "resolvers", + "exec_runnables_recipe_kind.sh", + ) + cmd_line = f"{AVOCADO} -V list --resolver-run-executables --resolver-exec-arguments tap {resolver_path}" + result = process.run(cmd_line) + self.assertIn( + b"tap true-test /bin/true exec-runnables-recipe", + result.stdout, + ) + self.assertIn( + b"tap false-test /bin/false exec-runnables-recipe", + result.stdout, + ) + self.assertIn(b"tap: 2\n", result.stdout) + class ResolverFunctionalTmp(TestCaseTmpDir): def test_runnables_recipe(self): diff --git a/setup.py b/setup.py index 32bcae611e..5a915fc095 100755 --- a/setup.py +++ b/setup.py @@ -394,6 +394,7 @@ def run(self): "nrunner = avocado.plugins.runner_nrunner:RunnerInit", "testlogsui = avocado.plugins.testlogs:TestLogsUIInit", "human = avocado.plugins.human:HumanInit", + "exec-runnables-recipe = avocado.plugins.resolvers:ExecRunnablesRecipeInit", ], "avocado.plugins.cli": [ "xunit = avocado.plugins.xunit:XUnitCLI", @@ -461,6 +462,7 @@ def run(self): "tap = avocado.plugins.resolvers:TapResolver", "runnable-recipe = avocado.plugins.resolvers:RunnableRecipeResolver", "runnables-recipe = avocado.plugins.resolvers:RunnablesRecipeResolver", + "exec-runnables-recipe = avocado.plugins.resolvers:ExecRunnablesRecipeResolver", ], "avocado.plugins.suite.runner": [ "nrunner = avocado.plugins.runner_nrunner:Runner",