diff --git a/perun/check/factory.py b/perun/check/factory.py index d8f0f6f14..391afc04f 100644 --- a/perun/check/factory.py +++ b/perun/check/factory.py @@ -144,6 +144,7 @@ def degradation_in_minor( :param bool quiet: if set to true then nothing will be printed :returns: list of found changes """ + log.major_info(f"Checking Version {minor_version}") selection: AbstractBaseSelection = pcs.selection() minor_version_info = pcs.vcs().get_minor_version_info(minor_version) @@ -178,13 +179,14 @@ def degradation_in_minor( @log.print_elapsed_time -@decorators.phase_function("check whole repository") def degradation_in_history(head: str) -> list[tuple[DegradationInfo, str, str]]: """Walks through the minor version starting from the given head, checking for degradation. :param str head: starting point of the checked history for degradation. :returns: tuple (degradation result, degradation location, degradation rate) """ + log.major_info("Checking Whole History") + log.minor_info("This might take a while") detected_changes = [] version_selection: AbstractBaseSelection = pcs.selection() with log.History(head) as history: @@ -269,7 +271,6 @@ def run_degradation_check( @log.print_elapsed_time -@decorators.phase_function("check two profiles") def degradation_between_files( baseline_file: Profile, target_file: Profile, @@ -286,6 +287,7 @@ def degradation_between_files( :param bool force: force profiles check despite different configurations :returns None: no return value """ + log.major_info("Checking two compatible profiles") # First check if the configurations are compatible baseline_config = profiles.to_config_tuple(baseline_file) target_config = profiles.to_config_tuple(target_file) @@ -309,7 +311,7 @@ def degradation_between_files( pcs.get_object_directory(), target_minor_version, detected_changes ) log.newline() - log.print_list_of_degradations(detected_changes, models_strategy) + log.print_list_of_degradations(detected_changes) log.print_short_summary_of_degradations(detected_changes) diff --git a/perun/cli.py b/perun/cli.py index 0af7795c5..95436907a 100644 --- a/perun/cli.py +++ b/perun/cli.py @@ -282,9 +282,10 @@ def init(dst: str, configure: bool, config_template: str, **kwargs: Any) -> None # Run the interactive configuration of the local perun repository (populating .yml) configure_local_perun(dst) else: - msg = "\nIn order to automatically run jobs configure the matrix at:\n" - msg += "\n" + (" " * 4) + ".perun/local.yml\n" - perun_log.quiet_info(msg) + perun_log.minor_status( + "Local instance of Perun can now be (manually) configured", + status=f"{perun_log.path_style('.perun/local.yml')}", + ) except PermissionError: perun_log.error("writing to shared config 'shared.yml' requires root permissions") except (ExternalEditorErrorException, MissingConfigSectionException): @@ -478,7 +479,7 @@ def remove( try: commands.remove_from_index(from_index_generator, minor) commands.remove_from_pending(from_jobs_generator) - except (NotPerunRepositoryException, EntryNotFoundException) as exception: + except (NotPerunRepositoryException) as exception: perun_log.error(f"could not remove profiles: {str(exception)}") diff --git a/perun/cli_groups/check_cli.py b/perun/cli_groups/check_cli.py index e658200d5..0d1821478 100644 --- a/perun/cli_groups/check_cli.py +++ b/perun/cli_groups/check_cli.py @@ -114,21 +114,20 @@ def check_group(**_: Any) -> None: str(perun_config.lookup_key_recursively("degradation.log_collect", "false")) ) if should_precollect: + log.major_info("Precollecting Profiles") collect_before_check = log.in_color("degradation.collect_before_check", "white", ["bold"]) - true = log.in_color("true", "green", ["bold"]) - log.info(f"{collect_before_check} is set to {true}. ", end="") - log.info("Missing profiles will be freshly collected with respect to the ", end="") - log.info( - "nearest job matrix (run `perun config edit` to modify the underlying job matrix)." - ) + log.minor_success(f"{log.highlight(collect_before_check)}", "true") + log.minor_info("Missing profiles will be now collected") + log.increase_indent() + log.minor_info(f"Run {log.cmd_style('perun config edit')} to modify the job matrix") + log.decrease_indent() if precollect_to_log: log_directory = log.in_color(pcs.get_log_directory(), "white", ["bold"]) - log.info( - f"The progress of the pre-collect phase will be stored in logs at {log_directory}." + log.minor_status( + "The progress will be stored in log", status=log.path_style(log_directory) ) else: - black_hole = log.in_color("black hole", "white", ["bold"]) - log.info(f"The progress of the pre-collect phase will be redirected to {black_hole}.") + log.minor_info(f"The progress will be redirected to {log.highlight('black hole')}") @check_group.command("head") @@ -152,7 +151,6 @@ def check_head(head_minor: str = "HEAD") -> None: By default, the ``hash`` corresponds to the `head` of the current project. """ - log.newline() check.degradation_in_minor(head_minor) @@ -174,9 +172,6 @@ def check_all(minor_head: str = "HEAD") -> None: and runs the performance check according to the set of strategies set in the configuration (see :ref:`degradation-config` or :doc:`config`). """ - log.info( - "[!] Running the degradation checks on the whole VCS history. This might take a while!\n" - ) check.degradation_in_history(minor_head) @@ -239,7 +234,6 @@ def check_profiles( """ assert ctx.parent is not None and f"impossible happened: {ctx} has no parent" - log.newline() check.degradation_between_files( baseline_profile, target_profile, diff --git a/perun/collect/bounds/run.py b/perun/collect/bounds/run.py index b30ee4288..c05d0e6d9 100644 --- a/perun/collect/bounds/run.py +++ b/perun/collect/bounds/run.py @@ -34,22 +34,25 @@ def before(sources: list[str], **kwargs: Any) -> tuple[CollectStatus, str, dict[ $ clang-3.5 -g -emit-llvm -c ${sources} """ + log.major_info("Compiling to LLVM", no_title=True) pwd = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bin") include_path = os.path.join(pwd, "include") clang_bin = ( _CLANG_COMPILER if shutil.which(_CLANG_COMPILER) else os.path.join(pwd, _CLANG_COMPILER) ) + log.minor_status(f"{log.highlight('clang')} found", status=log.path_style(clang_bin)) cmd = " ".join([clang_bin] + ["-I", include_path] + _CLANG_COMPILATION_PARAMS + list(sources)) - log.info(f"Compiling source codes: {','.join(sources)}") + + log.minor_status("Compiling source codes", status=f"{','.join(sources)}") my_env = os.environ.copy() my_env["LD_LIBRARY_PATH"] = pwd try: commands.run_safely_external_command(cmd, check_results=True, env=my_env, quiet=False) except SubprocessError as sub_err: - log.failed() + log.minor_fail("Compiling to LLVM") return CollectStatus.ERROR, str(sub_err), dict(kwargs) - log.done() + log.minor_success("Compiling to LLVM") return CollectStatus.OK, "status_message", dict(kwargs) @@ -61,13 +64,17 @@ def collect(sources: list[str], **kwargs: Any) -> tuple[CollectStatus, str, dict Finally, parses the output of Loopus into a profile """ + log.major_info("Running Loopus") pwd = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bin") loopus_bin = os.path.join(pwd, "loopus") source_filenames = [os.path.splitext(os.path.split(src)[1])[0] + _LLVM_EXT for src in sources] my_env = os.environ.copy() my_env["LD_LIBRARY_PATH"] = pwd - log.info(f"Running Loopus on compiled source codes: {' '.join(source_filenames)}") + log.minor_status(f"{log.highlight('Loopus')} found at", status=log.path_style(loopus_bin)) + log.minor_status( + "Running Loopus on compiled source codes", status=f"{' '.join(source_filenames)}" + ) before_analysis = systime.time() try: @@ -79,17 +86,18 @@ def collect(sources: list[str], **kwargs: Any) -> tuple[CollectStatus, str, dict returned_out, _ = commands.run_safely_external_command(cmd, check_results=True, env=my_env) out = returned_out.decode("utf-8") except SubprocessError as sub_err: - log.failed() + log.minor_fail("Collection of bounds") return CollectStatus.ERROR, str(sub_err), dict(kwargs) overall_time = systime.time() - before_analysis + log.minor_success("Collection of bounds") # Parse the out, but first fix the one file analysis, which has different format if len(sources) == 1: out = f"file {source_filenames[0]}\n" + out source_map = {bc: src for (bc, src) in zip(source_filenames, sources)} resources = parser.parse_output(out, source_map) + log.minor_success("Parsing collected output") - log.done() return ( CollectStatus.OK, "status message", diff --git a/perun/collect/complexity/run.py b/perun/collect/complexity/run.py index 1c33a15f7..348e652cb 100644 --- a/perun/collect/complexity/run.py +++ b/perun/collect/complexity/run.py @@ -74,6 +74,7 @@ def before(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, d string as a status message, mainly for error states dict of modified kwargs with 'cmd' value representing the executable """ + log.major_info("Preparing the instrumented executable") try: # Validate the inputs and dependencies first _validate_input(**kwargs) @@ -87,28 +88,24 @@ def before(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, d ) # Create the configuration cmake and build the configuration executable - log.cprint("Building the configuration executable...", "white") cmake_path = makefiles.create_config_cmake(target_dir, files) exec_path = makefiles.build_executable(cmake_path, makefiles.CMAKE_CONFIG_TARGET) - log.done() + log.minor_success("Building the configuration executable") # Extract some configuration data using the configuration executable - log.cprint("Extracting the configuration...", "white") function_sym = symbols.extract_symbols(exec_path) include_list, exclude_list, runtime_filter = symbols.filter_symbols(function_sym, rules) - log.done() + log.minor_success("Extracting the configuration") # Create the collector cmake and build the collector executable - log.cprint("Building the collector executable...", "white") cmake_path = makefiles.create_collector_cmake(target_dir, files, exclude_list) exec_path = makefiles.build_executable(cmake_path, makefiles.CMAKE_COLLECT_TARGET) - log.done() + log.minor_success("Building the collector executable") # Create the internal configuration file - log.cprint("Creating runtime config...", "white") configurator.create_runtime_config(exec_path, runtime_filter, include_list, kwargs) executable.cmd = exec_path - log.done() + log.minor_success("Creating the runtime config") return CollectStatus.OK, _COLLECTOR_STATUS_MSG[0], dict(kwargs) # The "expected" exception types @@ -119,7 +116,7 @@ def before(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, d UnicodeError, exceptions.UnexpectedPrototypeSyntaxError, ) as exception: - log.failed() + log.minor_fail("Preparing the instrumented executable") return CollectStatus.ERROR, str(exception), dict(kwargs) @@ -133,15 +130,15 @@ def collect(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, string as a status message, mainly for error states dict of unmodified kwargs """ - log.cprint("Running the collector...", "white") + log.major_info("Collecting Data") collect_dir = os.path.dirname(executable.cmd) # Run the command and evaluate the return code try: commands.run_safely_external_command(str(executable), cwd=collect_dir) - log.done() + log.minor_success("Collection of data") return CollectStatus.OK, _COLLECTOR_STATUS_MSG[0], dict(kwargs) except (CalledProcessError, IOError) as err: - log.failed() + log.minor_fail("Collection of data") return ( CollectStatus.ERROR, _COLLECTOR_STATUS_MSG[21] + f": {str(err)}", @@ -159,11 +156,12 @@ def after(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, di string as a status message, mainly for error states dict of modified kwargs with 'profile' value representing the resulting profile """ + log.major_info("Creating profile") # Get the trace log path - log.cprint("Starting the post-processing phase...", "white") internal_filename = kwargs.get("internal_data_filename", configurator.DEFAULT_DATA_FILENAME) data_path = os.path.join(os.path.dirname(executable.cmd), internal_filename) address_map = symbols.extract_symbol_address_map(executable.cmd) + log.minor_success("Symbol address map", "extracted") resources: list[dict[str, Any]] = [] call_stack: list[ProfileRecord] = [] @@ -183,7 +181,7 @@ def after(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, di err_msg += ( call_stack[-1].func + ", " + call_stack[-1].action if call_stack else "empty" ) - log.failed() + log.minor_fail("Parsing log") return CollectStatus.ERROR, err_msg, dict(kwargs) # Get the first and last record timestamps to determine the profiling time @@ -191,6 +189,7 @@ def after(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, di if is_first_line: is_first_line = False profile_start = int(record.timestamp) + log.minor_success("Parsing log") # Update the profile dictionary kwargs["profile"] = { @@ -199,7 +198,6 @@ def after(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, di "resources": resources, } } - log.done() return CollectStatus.OK, _COLLECTOR_STATUS_MSG[0], dict(kwargs) @@ -241,27 +239,34 @@ def _process_file_record( def _check_dependencies() -> None: """Validates that dependencies (cmake and make) are met""" - log.cprint("Checking dependencies...", "white") - log.newline() - log.cprint("make:", "white") - log.info("\t", end="") + log.minor_info("Checking dependencies") + log.increase_indent() + all_found = True if not shutil.which("make"): - log.no() + all_found = False + log.minor_fail("make", "not found") log.error( "Could not find 'make'. Please, install the makefile package.", recoverable=True, ) else: - log.yes() - log.cprint("cmake:", "white") - log.info("\t", end="") + log.minor_success("make", "found") if not shutil.which("cmake"): - log.no() - log.error("Could not find 'cmake'. Please, install build-essentials and cmake packages.") + all_found = False + log.minor_fail("cmake", "not found") + log.error( + "Could not find 'cmake'. Please, install `build-essentials` and `cmake` packages.", + recoverable=True, + ) + else: + log.minor_success("cmake", "found") + log.decrease_indent() + + if all_found: + log.minor_success("dependencies", "all found") else: - log.yes() - log.newline() - log.done() + log.minor_fail("dependencies", "not found") + log.error("Some dependencies were not satisfied: complexity cannot be run") def _validate_input(**kwargs: Any) -> None: diff --git a/perun/collect/memory/parsing.py b/perun/collect/memory/parsing.py index ba9c56fad..ef8c5397d 100644 --- a/perun/collect/memory/parsing.py +++ b/perun/collect/memory/parsing.py @@ -70,11 +70,13 @@ def parse_allocation_location(trace: list[dict[str, Any]]) -> dict[str, Any]: :param list trace: list representing stack call trace :returns dict: first user's call to allocation """ + result = {} for call in trace or []: source = call["source"] if source != "unreachable": - return call - return {} + result = call + break + return result def parse_resources(allocation: list[str]) -> dict[str, Any]: diff --git a/perun/collect/memory/run.py b/perun/collect/memory/run.py index 490ee091f..9b6ede5d0 100644 --- a/perun/collect/memory/run.py +++ b/perun/collect/memory/run.py @@ -26,29 +26,33 @@ def before(executable: Executable, **_: Any) -> tuple[CollectStatus, str, dict[s :param Executable executable: executable profiled command :returns tuple: (return code, status message, updated kwargs) """ + log.major_info("Building Instrumented Binary") pwd = os.path.dirname(os.path.abspath(__file__)) if not os.path.isfile(os.path.join(pwd, _lib_name)): - log.warn( - f"Missing compiled dynamic library 'lib{os.path.splitext(_lib_name)[0]}'. Compiling from sources: ", - end="", + log.minor_fail( + f"Dynamic library {log.path_style('lib' + os.path.splitext(_lib_name)[0])}", "not found" ) result = syscalls.init() if result: - log.failed() + log.minor_fail("Compiling from sources") error_msg = "Build of the library failed with error code: " error_msg += str(result) return CollectStatus.ERROR, error_msg, {} else: - log.done() + log.minor_success("Compiling from sources") + else: + log.minor_success( + f"Dynamic library {log.path_style('lib' + os.path.splitext(_lib_name)[0])}", "found" + ) - log.info("Checking if binary contains debugging information: ", end="") if not syscalls.check_debug_symbols(executable.cmd): - log.failed() - error_msg = "Binary does not contain debug info section.\n" - error_msg += "Please recompile your project with debug options (gcc -g | g++ -g)" + log.minor_fail("Checking if binary contains debugging info", "not found") + error_msg = "Binary does not contain debug info section. " + error_msg += ( + f"Please recompile your project with debug options {log.cmd_style('(gcc -g | g++ -g)')}" + ) return CollectStatus.ERROR, error_msg, {} - log.done() - log.info("Finished preprocessing step!\n") + log.minor_success("Checking if binary contains debugging info", "found") return CollectStatus.OK, "", {} @@ -59,16 +63,15 @@ def collect(executable: Executable, **_: Any) -> tuple[CollectStatus, str, dict[ :param Executable executable: executable profiled command :returns tuple: (return code, status message, updated kwargs) """ - log.info("Collecting data: ", end="") + log.major_info("Collecting Performance data") result, collector_errors = syscalls.run(executable) if result: - log.failed() + log.minor_fail("Collection of the raw data") error_msg = "Execution of binary failed with error code: " error_msg += str(result) + "\n" error_msg += collector_errors return CollectStatus.ERROR, error_msg, {} - log.done() - log.info("Finished collection of the raw data!\n") + log.minor_success("Collection of the raw data") return CollectStatus.OK, "", {} @@ -99,6 +102,7 @@ def after( excluding allocations in "f1" function, excluding allocators and unreachable records in call trace """ + log.major_info("Creating Profile") include_all = kwargs.get("all", False) exclude_funcs = kwargs.get("no_func", []) exclude_sources = kwargs.get("no_source", []) @@ -106,30 +110,26 @@ def after( try: profile = parser.parse_log(_tmp_log_filename, executable, sampling) except (IndexError, ValueError) as parse_err: - log.failed() + log.minor_fail("Parsing of log") return ( CollectStatus.ERROR, - f"Problems while parsing log file: {parse_err}", + f"Could not parse the log file due to: {parse_err}", {}, ) - log.done() + log.minor_success("Parsing of log") filters.set_global_region(profile) if not include_all: - log.info("Filtering traces: ", end="") filters.remove_allocators(profile) filters.trace_filter(profile, function=["?"], source=["unreachable"]) - log.done() + log.minor_success("Filtering traces") if exclude_funcs or exclude_sources: - log.info("Excluding functions and sources: ", end="") filters.allocation_filter(profile, function=exclude_funcs, source=exclude_sources) - log.done() + log.minor_success("Excluding functions") - log.info("Clearing records without assigned UID from profile: ", end="") filters.remove_uidless_records_from(profile) - log.done() - log.newline() + log.minor_success("Removing unassigned records") return CollectStatus.OK, "", {"profile": profile} diff --git a/perun/collect/time/run.py b/perun/collect/time/run.py index 6487b73cb..0c9aed3b4 100644 --- a/perun/collect/time/run.py +++ b/perun/collect/time/run.py @@ -36,18 +36,16 @@ def collect( :param dict _: dictionary with key, value options :return: """ - log.info("Executing the warmup-phase ", end="") + log.major_info("Running time collector") + log.minor_info("Warming up") for timing in range(0, warmup): command = " ".join(["time -p", str(executable)]).split(" ") commands.get_stdout_from_external_command(command).split("\n") - log.info(".", end="") + log.tick() sys.stdout.flush() log.newline() - log.info( - f"Begin timing of {executable.cmd} {common_kit.str_to_plural(repeat, 'time')}", - end="", - ) + log.minor_info(f"Timing {executable.cmd} {common_kit.str_to_plural(repeat, 'time')}") times = [] before_timing = systime.time() @@ -62,7 +60,7 @@ def collect( if len(t) == 2 and t[0] in TIME_TYPES ] ) - log.info(".", end="") + log.tick() sys.stdout.flush() log.newline() overall_time = systime.time() - before_timing diff --git a/perun/collect/trace/ebpf/ebpf.py b/perun/collect/trace/ebpf/ebpf.py index 58e8fa808..f975a85ef 100644 --- a/perun/collect/trace/ebpf/ebpf.py +++ b/perun/collect/trace/ebpf/ebpf.py @@ -239,7 +239,7 @@ def ebpf_runner(): if poll_duration * 1000 > _BPF_POLL_SLEEP * 0.25: break time.sleep(_BPF_SLEEP) - log.info(f"Lost: {BPF_CTX.lost} records") + log.write(f"Lost: {BPF_CTX.lost} records") BPF_CTX.data.close() temp.store_temp("ebpf:profiled_command.json", profiled_time, json_format=True) diff --git a/perun/collect/trace/run.py b/perun/collect/trace/run.py index e560684d7..b664f4919 100644 --- a/perun/collect/trace/run.py +++ b/perun/collect/trace/run.py @@ -77,7 +77,6 @@ def before(executable, **kwargs): WATCH_DOG.log_variable("before::kwargs::config", config.__dict__) WATCH_DOG.log_variable("before::kwargs::probes", kwargs["probes"].__dict__) - stdout.done("\n\n") return CollectStatus.OK, "", dict(kwargs) @@ -102,7 +101,6 @@ def collect(**kwargs): config.engine.collect(**kwargs) metrics.end_timer("collect_time") - stdout.done("\n\n") return CollectStatus.OK, "", dict(kwargs) @@ -134,7 +132,6 @@ def after(**kwargs): kwargs["profile"] = kwargs["config"].engine.transform(**kwargs) WATCH_DOG.info("Data processing finished.") - stdout.done("\n\n") return CollectStatus.OK, "", dict(kwargs) @@ -164,7 +161,6 @@ def teardown(**kwargs): metrics.end_timer("total_time") # metrics.save() - stdout.done("\n\n") return CollectStatus.OK, "", dict(kwargs) diff --git a/perun/collect/trace/watchdog.py b/perun/collect/trace/watchdog.py index 512fe3551..10dfafee4 100644 --- a/perun/collect/trace/watchdog.py +++ b/perun/collect/trace/watchdog.py @@ -138,7 +138,7 @@ def info(self, msg, always=True): """ if not self.__enabled: if always and not self.quiet: - perun_log.info(msg) + perun_log.write(msg) else: # Transform the info message to a debug message if quiet is set if self.quiet: diff --git a/perun/fuzz/evaluate/by_coverage.py b/perun/fuzz/evaluate/by_coverage.py index 3a417007b..2e18ebe2f 100644 --- a/perun/fuzz/evaluate/by_coverage.py +++ b/perun/fuzz/evaluate/by_coverage.py @@ -84,9 +84,9 @@ def baseline_testing( """ # get source files (.c, .cc, .cpp, .h) config.coverage.source_files = get_src_files(config.coverage.source_path) - log.info("Detected gcov version ", end="") - log.cprint(f"{config.coverage.gcov_version}", "white") - log.info("") + log.minor_status( + f"{log.highlight('gcov')} version", status=f"version {config.coverage.gcov_version}" + ) return get_initial_coverage(executable, workloads, config.hang_timeout, config) @@ -108,6 +108,8 @@ def get_initial_coverage( coverages = [] # run program with each seed + log.minor_info("Running program with seeds") + log.increase_indent() for seed in seeds: prepare_workspace(fuzzing_config.coverage.gcno_path) @@ -115,11 +117,14 @@ def get_initial_coverage( try: commands.run_safely_external_command(command, timeout=timeout) + log.minor_success(f"{log.cmd_style(command)}") except subprocess.CalledProcessError as serr: + log.minor_fail(f"{log.cmd_style(command)}") log.error("Initial testing with file " + seed.path + " caused " + str(serr)) seed.cov = get_coverage_from_dir(os.getcwd(), fuzzing_config.coverage) coverages.append(seed.cov) + log.decrease_indent() return int(statistics.median(coverages)) diff --git a/perun/fuzz/evaluate/by_perun.py b/perun/fuzz/evaluate/by_perun.py index fe687c69d..aaf28b23f 100644 --- a/perun/fuzz/evaluate/by_perun.py +++ b/perun/fuzz/evaluate/by_perun.py @@ -18,7 +18,7 @@ if TYPE_CHECKING: from perun.fuzz.structs import Mutation from perun.profile.factory import Profile - from perun.utils.structs import Executable, MinorVersion, CollectStatus + from perun.utils.structs import Executable, MinorVersion, CollectStatus, Job DEGRADATION_RATIO_THRESHOLD = 0.0 @@ -31,9 +31,11 @@ def baseline_testing( postprocessor: list[str], minor_version_list: list[MinorVersion], **kwargs: Any, -) -> Iterable[tuple[CollectStatus, Profile, str]]: +) -> Iterable[tuple[CollectStatus, Profile, Job]]: """Generates a profile for specified command with init seeds, compares each other. + TODO: This might need some checking and tweaking as I believe it is quite shady + :param Executable executable: called command with arguments :param list seeds: list of workloads :param str collector: list of collectors @@ -80,7 +82,7 @@ def target_testing( collector: str, postprocessor: list[str], minor_version_list: list[MinorVersion], - base_result: Iterable[tuple[CollectStatus, Profile, str]], + base_result: Iterable[tuple[CollectStatus, Profile, Job]], **kwargs: Any, ) -> bool: """Generates a profile for specified command with fuzzed workload, compares with @@ -112,8 +114,8 @@ def target_testing( def check_for_change( - base_pg: Iterable[tuple[CollectStatus, Profile, str]], - target_pg: Iterable[tuple[CollectStatus, Profile, str]], + base_pg: Iterable[tuple[CollectStatus, Profile, Job]], + target_pg: Iterable[tuple[CollectStatus, Profile, Job]], method: str = "best-model", ) -> float: """Function that randomly choose an index from list. diff --git a/perun/fuzz/factory.py b/perun/fuzz/factory.py index c73b94789..6a13ebef2 100644 --- a/perun/fuzz/factory.py +++ b/perun/fuzz/factory.py @@ -189,8 +189,8 @@ def print_legend(rule_set: RuleSet) -> None: :param RuleSet rule_set: selected fuzzing (mutation) strategies and their stats """ - log.info("Statistics of rule set") - log.info( + log.minor_info("Statistics of rule set", end="\n\n") + log.write( tabulate.tabulate( [[i, rule_set.hits[i], method[1]] for (i, method) in enumerate(rule_set.rules)], headers=["id", "Rule Efficiency", "Description"], @@ -209,21 +209,33 @@ def print_results( :param FuzzingConfiguration fuzzing_config: configuration of the fuzzing :param RuleSet rule_set: selected fuzzing (mutation) strategies and their stats """ - log.info("Fuzzing: ", end="") - log.done("\n") - log.info(f"Fuzzing time: {fuzzing_report.end_time - fuzzing_report.start_time:.2f}s") - log.info(f"Coverage testing: {fuzzing_config.coverage_testing}") + log.major_info("Summary of Fuzzing") + log.minor_status( + "Fuzzing time", status=f"{fuzzing_report.end_time - fuzzing_report.start_time:.2f}s" + ) + log.minor_status("Coverage testing", status=f"{fuzzing_config.coverage_testing}") + log.increase_indent() if fuzzing_config.coverage_testing: - log.info(f"Program executions for coverage testing: {fuzzing_report.cov_execs}") - log.info(f"Program executions for performance testing: {fuzzing_report.perun_execs}") - log.info(f"Total program tests: {fuzzing_report.perun_execs + fuzzing_report.cov_execs}") - log.info(f"Maximum coverage ratio: {fuzzing_report.max_cov}") + log.minor_status( + "Program executions for coverage testing", status=f"{fuzzing_report.cov_execs}" + ) + log.minor_status( + "Program executions for performance testing", status=f"{fuzzing_report.perun_execs}" + ) + log.minor_status( + "Total program tests", + status=f"{fuzzing_report.perun_execs + fuzzing_report.cov_execs}", + ) + log.minor_status("Maximum coverage ratio", status=f"{fuzzing_report.max_cov}") else: - log.info(f"Program executions for performance testing: {fuzzing_report.perun_execs}") - log.info(f"Founded degradation mutations: {fuzzing_report.degradations}") - log.info(f"Hangs: {fuzzing_report.hangs}") - log.info(f"Faults: {fuzzing_report.faults}") - log.info(f"Worst-case mutation: {fuzzing_report.worst_case}") + log.minor_status( + "Program executions for performance testing", status=f"{fuzzing_report.perun_execs}" + ) + log.decrease_indent() + log.minor_status("Founded degradation mutations", status=f"{fuzzing_report.degradations}") + log.minor_status("Hangs", status=f"{fuzzing_report.hangs}") + log.minor_status("Faults", status=f"{fuzzing_report.faults}") + log.minor_status("Worst-case mutation", status=f"{log.path_style(fuzzing_report.worst_case)}") print_legend(rule_set) @@ -348,10 +360,9 @@ def teardown( :param RuleSet rule_set: list of fuzzing methods and their stats :param FuzzingConfiguration config: configuration of the fuzzing """ - log.info("Executing teardown of the fuzzing. ") + log.major_info("Teardown") if not config.no_plotting: # Plot the results as time series - log.info("Plotting time series of fuzzing process...") interpret.plot_fuzz_time_series( fuzz_progress.deg_time_series, output_dirs["graphs"] + "/degradations_ts.pdf", @@ -359,6 +370,7 @@ def teardown( "time (s)", "degradations", ) + log.minor_success(f"Plotting {log.highlight('degradations')} in time graph") if config.coverage_testing: interpret.plot_fuzz_time_series( fuzz_progress.cov_time_series, @@ -367,6 +379,7 @@ def teardown( "time (s)", "executed lines ratio", ) + log.minor_success(f"Plotting {log.highlight('coverage')} in time graph") # Plot the differences between seeds and inferred mutation interpret.files_diff(fuzz_progress, output_dirs["diffs"]) # Save log files @@ -377,7 +390,6 @@ def teardown( fuzz_progress.stats.end_time = time.time() fuzz_progress.stats.worst_case = fuzz_progress.parents[-1].path print_results(fuzz_progress.stats, config, rule_set) - log.done() sys.exit(0) @@ -440,12 +452,12 @@ def perform_baseline_coverage_testing( :return: base coverage of parents """ base_cov = 0 - log.info("Performing coverage-based testing on parent seeds.") try: # Note that evaluate workloads modifies config as a side effect base_cov = evaluate_workloads_by_coverage.baseline_testing(executable, parents, config) - log.done() + log.minor_success("Coverage-based testing on parent seeds.") except TimeoutExpired: + log.minor_fail("Coverage-based testing on parent seeds.") log.error( f"Timeout ({config.hang_timeout}s) reached when testing with initial files. " f"Adjust hang timeout using option --hang-timeout, resp. -h." @@ -454,7 +466,6 @@ def perform_baseline_coverage_testing( @log.print_elapsed_time -@decorators.phase_function("fuzz performance") def run_fuzzing_for_command( executable: Executable, input_sample: list[str], @@ -472,6 +483,7 @@ def run_fuzzing_for_command( :param list minor_version_list: list of minor version for which we are collecting :param dict kwargs: rest of the keyword arguments """ + log.major_info("Fuzzing") # Initialization fuzz_progress = FuzzingProgress() @@ -497,19 +509,19 @@ def run_fuzzing_for_command( config.coverage_testing = False log.warn("No .gcno files were found.") - log.info("Performing perun-based testing on parent seeds.") # Init performance testing with seeds base_result_profile = evaluate_workloads_by_perun.baseline_testing( executable, parents, collector, postprocessor, minor_version_list, **kwargs ) - log.done() + log.minor_success("Perun-based testing on parent seeds") - log.info("Rating parents ", end="") + log.minor_info("Rating parents") # Rate seeds + log.increase_indent() for parent_seed in parents: rate_parent(fuzz_progress, parent_seed) - log.info(".", end="") - log.done() + log.minor_status(f"{log.path_style(parent_seed.path)}", status=f"{parent_seed.fitness}") + log.decrease_indent() save_state(fuzz_progress) fuzz_progress.stats.start_time = time.time() @@ -525,7 +537,6 @@ def signal_handler(sig: int, _: Optional[types.FrameType]) -> None: while (time.time() - fuzz_progress.stats.start_time) < config.timeout: # Gathering interesting workloads if config.coverage_testing: - log.info("Gathering interesting workloads using coverage based testing") execs = config.exec_limit while len(fuzz_progress.interesting_workloads) < config.precollect_limit and execs > 0: @@ -573,10 +584,10 @@ def signal_handler(sig: int, _: Optional[types.FrameType]) -> None: # not successful mutation or the same file as previously generated else: os.remove(mutation.path) - log.done() # adapting increase coverage ratio config.refine_coverage_rate(fuzz_progress.interesting_workloads) + log.minor_success("Gathering using coverage-based testing") # not coverage testing, only performance testing else: @@ -584,12 +595,13 @@ def signal_handler(sig: int, _: Optional[types.FrameType]) -> None: fuzz_progress.interesting_workloads = fuzz( current_workload, max_bytes, rule_set, config ) + log.minor_success("Gathering using Perun-based testing") - log.info("Evaluating gathered mutations ") + log.minor_info("Evaluating mutations") + log.increase_indent() for mutation in fuzz_progress.interesting_workloads: # creates copy of generator base_result_profile, base_copy = itertools.tee(base_result_profile) - log.info(".", end="") # testing with perun successful_result = False @@ -610,12 +622,13 @@ def signal_handler(sig: int, _: Optional[types.FrameType]) -> None: except Exception as exc: log.warn(f"Executing binary raised an exception: {exc}") + log.minor_status(f"{mutation.path}", status=f"{mutation.fitness}") # in case of testing with coverage, parent will not be removed but used for mutation if not successful_result and not config.coverage_testing: os.remove(mutation.path) + log.decrease_indent() # deletes interesting workloads for next run - log.done() del fuzz_progress.interesting_workloads[:] # get end time diff --git a/perun/fuzz/filesystem.py b/perun/fuzz/filesystem.py index a7cbab4a6..f740a150c 100644 --- a/perun/fuzz/filesystem.py +++ b/perun/fuzz/filesystem.py @@ -77,7 +77,7 @@ def del_temp_files( :param FuzzingProgress fuzz_progress: progress of the fuzzing :param str output_dir: path to directory, where fuzzed files are stored """ - log.info("Removing remaining mutations", end="") + log.minor_info("Removing mutations") for mutation in parents: if ( mutation not in fuzz_progress.final_results @@ -88,6 +88,5 @@ def del_temp_files( ): with exceptions.SuppressedExceptions(FileNotFoundError): os.remove(mutation.path) - log.info(".") - log.info("-") - log.done() + log.tick() + log.newline() diff --git a/perun/fuzz/interpret.py b/perun/fuzz/interpret.py index da3d561fa..3978b8199 100644 --- a/perun/fuzz/interpret.py +++ b/perun/fuzz/interpret.py @@ -41,10 +41,12 @@ def save_anomalies(anomalies: list[Mutation], anomaly_type: str, file_handle: Te :param File file_handle: file, where the anomalies are written """ if anomalies: + log.minor_info(f"Saving {log.highlight(anomaly_type + 's')}") file_handle.write(f"{anomaly_type.capitalize()}s:\n") for anomaly in anomalies: file_handle.write(anomaly.path + " " + str(anomaly.history) + "\n") - log.info(".") + log.tick() + log.newline() def save_time_series(file_handle: TextIO, time_series: TimeSeries) -> None: @@ -55,7 +57,6 @@ def save_time_series(file_handle: TextIO, time_series: TimeSeries) -> None: """ for x_value, y_value in zip(time_series.x_axis, time_series.y_axis): file_handle.write(str(x_value) + " " + str(y_value) + "\n") - log.info(".") def save_log_files(log_dir: str, fuzz_progress: FuzzingProgress) -> None: @@ -64,14 +65,16 @@ def save_log_files(log_dir: str, fuzz_progress: FuzzingProgress) -> None: :param str log_dir: path to the output log directory :param FuzzingProgress fuzz_progress: progress of the fuzzing """ - log.info("Saving log files") deg_data_file = open(log_dir + "/degradation_plot_data.txt", "w") cov_data_file = open(log_dir + "/coverage_plot_data.txt", "w") results_data_file = open(log_dir + "/results_data.txt", "w") save_time_series(deg_data_file, fuzz_progress.deg_time_series) + log.minor_success("Saving degradation time series") save_time_series(cov_data_file, fuzz_progress.cov_time_series) + log.minor_success("Saving coverage time series") + log.minor_info("Saving log files") for mut in fuzz_progress.parents: results_data_file.write( str(mut.fitness) @@ -85,13 +88,11 @@ def save_log_files(log_dir: str, fuzz_progress: FuzzingProgress) -> None: + str(mut.history) + "\n" ) - log.info(".") - log.done() + log.tick() + log.newline() save_anomalies(fuzz_progress.hangs, "hang", results_data_file) - log.done() save_anomalies(fuzz_progress.faults, "fault", results_data_file) - log.done() deg_data_file.close() cov_data_file.close() @@ -108,10 +109,12 @@ def get_time_for_value( :param list data: measured values (y-axis) :return int: time value from `time_data` according to measured value from `data` """ + result = time_data[-1] for x, y in zip(time_data, data): if y >= value: - return x - return time_data[-1] + result = x + break + return result def lazy_initialize_matplotlib() -> None: @@ -258,12 +261,15 @@ def files_diff(fuzz_progress: FuzzingProgress, diffs_dir: str) -> None: :param FuzzingProgress fuzz_progress: collection of statistics of fuzzing process :param str diffs_dir: path to the directory where diffs will be stored """ - log.info("Computing deltas") - for mutations in [ - fuzz_progress.final_results, - fuzz_progress.faults, - fuzz_progress.hangs, + log.minor_info("Computing deltas") + log.increase_indent() + for mutation_type, mutations in [ + ("Final", fuzz_progress.final_results), + ("Faults", fuzz_progress.faults), + ("Hangs", fuzz_progress.hangs), ]: + if mutations: + log.minor_info(mutation_type) for res in mutations: if res.predecessor is not None: pred = streams.safely_load_file(res.predecessor.path) @@ -291,5 +297,7 @@ def files_diff(fuzz_progress: FuzzingProgress, diffs_dir: str) -> None: open(diff_file_name, "w").writelines(diff) filesystem.move_file_to(diff_file_name, diffs_dir) - log.info(".") - log.done() + log.tick() + if mutations: + log.newline() + log.decrease_indent() diff --git a/perun/logic/commands.py b/perun/logic/commands.py index 3779bb199..0a1718d4b 100644 --- a/perun/logic/commands.py +++ b/perun/logic/commands.py @@ -61,13 +61,14 @@ def config_get(store_type: str, key: str) -> None: :param str store_type: type of the store lookup (local, shared of recursive) :param str key: list of section delimited by dot (.) """ + # Note, this is bare output, since, it "might" be used in scripts or CI or anything parsed config_store = pcs.global_config() if store_type in ("shared", "global") else pcs.local_config() if store_type == "recursive": value = perun_config.lookup_key_recursively(key) else: value = config_store.get(key) - perun_log.info(f"{key}: {value}") + perun_log.write(f"{key}: {value}") def config_set(store_type: str, key: str, value: Any) -> None: @@ -77,10 +78,11 @@ def config_set(store_type: str, key: str, value: Any) -> None: :param str key: list of section delimited by dot (.) :param object value: arbitrary value that will be set in the configuration """ + perun_log.major_info("Setting new key in Config") config_store = pcs.global_config() if store_type in ("shared", "global") else pcs.local_config() config_store.set(key, value) - perun_log.info(f"Value '{value}' set for key '{key}'") + perun_log.minor_info(f"Value '{value}' set for key '{key}'") def config_edit(store_type: str) -> None: @@ -109,6 +111,7 @@ def config_reset(store_type: str, config_template: str) -> None: :param str config_template: name of the template that we are resetting to :raises NotPerunRepositoryException: raised when we are outside of any perun scope """ + perun_log.major_info(f"Resetting {store_type} config") is_shared_config = store_type in ("shared", "global") if is_shared_config: shared_location = perun_config.lookup_shared_config_dir() @@ -117,9 +120,10 @@ def config_reset(store_type: str, config_template: str) -> None: vcs_url, vcs_type = pcs.get_vcs_type_and_url() vcs_config = {"vcs": {"url": vcs_url, "type": vcs_type}} perun_config.init_local_config_at(pcs.get_path(), vcs_config, config_template) - perun_log.info(f"{'global' if is_shared_config else 'local'} configuration reset") + + perun_log.minor_info(f"{'global' if is_shared_config else 'local'} configuration reset") if not is_shared_config: - perun_log.info(f" to {config_template}") + perun_log.write(f" to {config_template}") def init_perun_at( @@ -150,14 +154,16 @@ def init_perun_at( if not os.path.exists(os.path.join(perun_full_path, "local.yml")): perun_config.init_local_config_at(perun_full_path, vcs_config, config_template) else: - perun_log.info( - "configuration file already exists. Run ``perun config reset`` to reset" - " configuration to default state." + perun_log.minor_info(f"configuration file {perun_log.highlight('already exists')}") + perun_log.minor_info( + f"Run {perun_log.cmd_style('perun config reset')} to reset configuration to default state", ) # Perun successfully created - msg_prefix = "Reinitialized existing" if is_reinit else "Initialized empty" - perun_log.msg_to_stdout(msg_prefix + f" Perun repository in {perun_path}", 0) + msg = "Reinitialized " if is_reinit else "Initialized " + msg += perun_log.highlight("existing") if is_reinit else perun_log.highlight("empty") + msg += " Perun repository" + perun_log.minor_status(msg, status=f"{perun_log.path_style(perun_path)}") def init(dst: str, configuration_template: str = "master", **kwargs: Any) -> None: @@ -171,6 +177,7 @@ def init(dst: str, configuration_template: str = "master", **kwargs: Any) -> Non :param str configuration_template: name of the template that will be used for initialization of local configuration """ + perun_log.major_info("Initializing Perun") # First init the wrapping repository well vcs_type = kwargs["vcs_type"] vcs_path = kwargs["vcs_path"] or dst @@ -184,7 +191,10 @@ def init(dst: str, configuration_template: str = "master", **kwargs: Any) -> Non super_perun_dir = common_kit.locate_perun_dir_on(dst) is_pcs_reinitialized = super_perun_dir == dst if not is_pcs_reinitialized: - perun_log.warn(f"There exists super perun directory at {super_perun_dir}") + perun_log.warn( + f"There exists perun directory at {perun_log.path_style(super_perun_dir)}", + end="\n\n", + ) except NotPerunRepositoryException: is_pcs_reinitialized = False @@ -216,11 +226,16 @@ def add( :param bool force: if set to true, then the add will be forced, i.e. the check for origin will not be performed. """ + perun_log.major_info("Adding profiles") added_profile_count = 0 for profile_name in profile_names: # Test if the given profile exists (This should hold always, or not?) + reg_rel_path = os.path.relpath(profile_name) if not os.path.exists(profile_name): - perun_log.error(f"profile {profile_name} does not exists", recoverable=True) + perun_log.minor_fail(perun_log.path_style(f"{reg_rel_path}")) + perun_log.increase_indent() + perun_log.minor_info("profile does not exists") + perun_log.decrease_indent() continue # Load profile content @@ -229,9 +244,14 @@ def add( unpacked_profile = store.load_profile_from_file(profile_name, True, unsafe_load=True) if not force and unpacked_profile["origin"] != minor_version: - error_msg = f"cannot add profile '{profile_name}' to minor index of '{minor_version}':" - error_msg += f"profile originates from minor version '{unpacked_profile['origin']}'" - perun_log.error(error_msg, recoverable=True) + perun_log.minor_fail(perun_log.path_style(f"{reg_rel_path}")) + perun_log.minor_status( + "current version", status=f"{perun_log.highlight(minor_version)}" + ) + perun_log.minor_status( + "origin version", + status=f"{perun_log.highlight(unpacked_profile['origin'])}", + ) continue # Remove origin from file @@ -259,15 +279,19 @@ def add( if not keep_profile: os.remove(profile_name) + perun_log.minor_success(perun_log.path_style(f"{reg_rel_path}"), "registered") added_profile_count += 1 profile_names_len = len(profile_names) + perun_log.minor_status( + "Registration succeeded for", + status=f"{perun_log.success_highlight(common_kit.str_to_plural(added_profile_count, 'profile'))}", + ) if added_profile_count != profile_names_len: - perun_log.error( - f"could not register {common_kit.str_to_plural(added_profile_count, 'profile')}" - f" in index: {added_profile_count - profile_names_len} failed" + failed_profile_count = common_kit.str_to_plural( + profile_names_len - added_profile_count, "profile" ) - perun_log.info(f"successfully registered {added_profile_count} profiles in index") + perun_log.error(f"Registration failed for - {failed_profile_count}") @vcs_kit.lookup_minor_version @@ -287,24 +311,21 @@ def remove_from_pending(profile_generator: Collection[str]) -> None: :param generator profile_generator: generator of profiles that will be removed from pending jobs """ + perun_log.major_info("Removing from pending") removed_profile_number = len(profile_generator) for i, pending_file in enumerate(profile_generator): + count_status = f"{common_kit.format_counter_number(i + 1, removed_profile_number)}/{removed_profile_number}" os.remove(pending_file) - perun_log.info( - "{}/{} deleted {} from pending jobs".format( - common_kit.format_counter_number(i + 1, removed_profile_number), - removed_profile_number, - perun_log.in_color(os.path.split(pending_file)[1], "grey"), - ) - ) + perun_log.minor_success(f"{count_status} {perun_log.path_style(pending_file)}", "deleted") if removed_profile_number: - result_string = perun_log.in_color( - f"{common_kit.str_to_plural(removed_profile_number, 'profile')}", - "white", - ["bold"], + perun_log.major_info("Summary") + result_string = perun_log.success_highlight( + f"{common_kit.str_to_plural(removed_profile_number, 'profile')}" ) - perun_log.info(f"successfully removed {result_string} from pending jobs") + perun_log.minor_status("Removal succeeded for", status=result_string) + else: + perun_log.minor_info("Nothing to remove") def calculate_profile_numbers_per_type( @@ -336,16 +357,16 @@ def print_profile_numbers( :param str line_ending: ending of the print (for different outputs of log and status) """ if profile_numbers["all"]: - perun_log.info(f"{profile_numbers['all']} {profile_types} profiles (", end="") + perun_log.write(f"{profile_numbers['all']} {profile_types} profiles (", end="") first_printed = False for profile_type in SUPPORTED_PROFILE_TYPES: if not profile_numbers[profile_type]: continue - perun_log.info(", " if first_printed else "", end="") + perun_log.write(", " if first_printed else "", end="") first_printed = True type_colour = PROFILE_TYPE_COLOURS[profile_type] perun_log.cprint(f"{profile_numbers[profile_type]} {profile_type}", type_colour) - perun_log.info(")", end=line_ending) + perun_log.write(")", end=line_ending) else: perun_log.cprintln(f"(no {profile_types} profiles)", TEXT_WARN_COLOUR, attrs=TEXT_ATTRS) @@ -404,6 +425,7 @@ def log(minor_version: str, short: bool = False, **_: Any) -> None: minor_version_maxima = calculate_maximal_lengths_for_object_list( minor_versions, MinorVersion.valid_fields() ) + # Update manually the maxima for the printed supported profile types, each requires two # characters and 9 stands for " profiles" string @@ -543,7 +565,7 @@ def print_shortlog_profile_list( # Non-token parts of the formatting string are printed as they are else: perun_log.cprint(token, "white") - perun_log.info("") + perun_log.newline() def print_shortlog_token( @@ -602,7 +624,7 @@ def print_changes_token(max_lengths: dict[str, int], minor_version: MinorVersion perun_log.count_degradations_per_group(degradations), width=max_lengths["changes"], ) - perun_log.info(change_string, end="") + perun_log.write(change_string, end="") def print_stats_token( @@ -621,7 +643,7 @@ def print_stats_token( pcs.get_object_directory(), minor_version.checksum ) if tracked_profiles["all"]: - perun_log.info( + perun_log.write( perun_log.in_color( "{:{}}".format(tracked_profiles["all"], max_lengths["all"]), TEXT_EMPH_COLOUR, @@ -632,7 +654,7 @@ def print_stats_token( # Print the coloured numbers for profile_type in SUPPORTED_PROFILE_TYPES: - perun_log.info( + perun_log.write( "{}{}".format( perun_log.in_color(PROFILE_DELIMITER, HEADER_SLASH_COLOUR), perun_log.in_color( @@ -643,9 +665,9 @@ def print_stats_token( end="", ) - perun_log.info(perun_log.in_color(" profiles", HEADER_INFO_COLOUR, TEXT_ATTRS), end="") + perun_log.write(perun_log.in_color(" profiles", HEADER_INFO_COLOUR, TEXT_ATTRS), end="") else: - perun_log.info( + perun_log.write( perun_log.in_color( "--no--profiles--".center(stat_length), TEXT_WARN_COLOUR, TEXT_ATTRS ), @@ -680,7 +702,7 @@ def print_shortlog_profile_list_header( else: # Print the rest (non-token stuff) perun_log.cprint(token, "white", HEADER_ATTRS) - perun_log.info("") + perun_log.newline() def print_shortlog_stats_header(max_lengths: dict[str, int]) -> None: @@ -692,7 +714,7 @@ def print_shortlog_stats_header(max_lengths: dict[str, int]) -> None: """ slash = perun_log.in_color(PROFILE_DELIMITER, HEADER_SLASH_COLOUR, HEADER_ATTRS) end_msg = perun_log.in_color(" profiles", HEADER_SLASH_COLOUR, HEADER_ATTRS) - perun_log.info( + perun_log.write( perun_log.in_color( "{0}{4}{1}{4}{2}{4}{3}{5}".format( perun_log.in_color( @@ -737,16 +759,16 @@ def print_minor_version_info(head_minor_version: MinorVersion, indent: int = 0) :param MinorVersion head_minor_version: identification of the commit (preferably sha1) :param int indent: indent of the description part """ - perun_log.info( + perun_log.write( f"Author: {head_minor_version.author} <{head_minor_version.email}> {head_minor_version.date}" ) for parent in head_minor_version.parents: - perun_log.info(f"Parent: {parent}") - perun_log.info("") + perun_log.write(f"Parent: {parent}") + perun_log.newline() indented_desc = "\n".join( map(lambda line: " " * (indent * 4) + line, head_minor_version.desc.split("\n")) ) - perun_log.info(indented_desc) + perun_log.write(indented_desc) def print_other_formatting_string( @@ -910,12 +932,12 @@ def print_status_profiles( :param list profiles: list of profiles """ for profile_no, profile_info in enumerate(profiles): - perun_log.info(" ", end="") + perun_log.write(" ", end="") perun_log.cprint( f"{profile_no}@{list_config.id_char}".rjust(list_config.id_width + 2, " "), list_config.colour, ) - perun_log.info(" ", end="") + perun_log.write(" ", end="") for token_type, token in fmt_tokens: if token_type == "fmt_string": if m := FMT_REGEX.match(token): @@ -933,7 +955,7 @@ def print_status_profiles( perun_log.error(f"incorrect formatting token {token}") else: perun_log.cprint(token, list_config.colour) - perun_log.info("") + perun_log.newline() if profile_no % 5 == 0 or profile_no == list_config.list_len - 1: perun_log.cprintln("\u2550" * list_config.header_width + "\u25A3", list_config.colour) @@ -956,9 +978,9 @@ def print_status_profile_list_header( :param dict max_lengths: mapping of token types ot their maximal lengths for alignment """ perun_log.cprintln("\u2550" * list_config.header_width + "\u25A3", list_config.colour) - perun_log.info(" ", end="") + perun_log.write(" ", end="") perun_log.cprint("id".center(list_config.id_width + 2, " "), list_config.colour) - perun_log.info(" ", end="") + perun_log.write(" ", end="") for token_type, token in fmt_tokens: if token_type == "fmt_string": if m := FMT_REGEX.match(token): @@ -973,7 +995,7 @@ def print_status_profile_list_header( else: # Print the rest (non token stuff) perun_log.cprint(token, list_config.colour) - perun_log.info("") + perun_log.newline() perun_log.cprintln("\u2550" * list_config.header_width + "\u25A3", list_config.colour) @@ -1088,18 +1110,18 @@ def status(short: bool = False, **_: Any) -> None: minor_head = pcs.vcs().get_minor_head() # Print the status of major head. - perun_log.info( + perun_log.write( f"On major version {perun_log.in_color(major_head, TEXT_EMPH_COLOUR, TEXT_ATTRS)} ", end="" ) # Print the index of the current head - perun_log.info( + perun_log.write( f"(minor version: {perun_log.in_color(minor_head, TEXT_EMPH_COLOUR, TEXT_ATTRS)})" ) # Print in long format, the additional information about head commit, by default print if not short: - perun_log.info("") + perun_log.newline() minor_version = pcs.vcs().get_minor_version_info(minor_head) print_minor_version_info(minor_version) @@ -1112,16 +1134,16 @@ def status(short: bool = False, **_: Any) -> None: ) print_status_profile_list(minor_version_profiles, maxs, short) if not short: - perun_log.info("") + perun_log.newline() print_status_profile_list(untracked_profiles, maxs, short, "untracked") # Print degradation info degradation_list = store.load_degradation_list_for(pcs.get_object_directory(), minor_head) if not short: - perun_log.info("") + perun_log.newline() perun_log.print_short_summary_of_degradations(degradation_list) if not short: - perun_log.info("") + perun_log.newline() perun_log.print_list_of_degradations(degradation_list) @@ -1187,7 +1209,7 @@ def print_temp_files(root: str, **kwargs: Any) -> None: ] # If there are no files then abort the output if not tmp_files: - perun_log.info("== No results for the given parameters in the .perun/tmp/ directory ==") + perun_log.write("== No results for the given parameters in the .perun/tmp/ directory ==") return # First sort by the name @@ -1221,19 +1243,19 @@ def print_formatted_temp_files( for file_name, protection, size in records: # Print the size of each file if show_size: - perun_log.info( + perun_log.write( f"{perun_log.in_color(perun_log.format_file_size(size), TEXT_EMPH_COLOUR)}", end=perun_log.in_color(" | ", TEXT_WARN_COLOUR), ) # Print the protection level of each file if show_protection: if protection == temp.UNPROTECTED: - perun_log.info( + perun_log.write( f"{temp.UNPROTECTED}", end=perun_log.in_color(" | ", TEXT_WARN_COLOUR), ) else: - perun_log.info( + perun_log.write( f"{perun_log.in_color(temp.PROTECTED, TEXT_WARN_COLOUR)} ", end=perun_log.in_color(" | ", TEXT_WARN_COLOUR), ) @@ -1243,8 +1265,8 @@ def print_formatted_temp_files( file_dir = os.path.dirname(file_name) if file_dir: file_dir += os.sep - perun_log.info(f"{perun_log.in_color(file_dir, TEXT_EMPH_COLOUR)}", end="") - perun_log.info(f"{os.path.basename(file_name)}") + perun_log.write(f"{perun_log.in_color(file_dir, TEXT_EMPH_COLOUR)}", end="") + perun_log.write(f"{os.path.basename(file_name)}") def delete_temps(path: str, ignore_protected: bool, force: bool, **kwargs: Any) -> None: @@ -1293,7 +1315,7 @@ def list_stat_objects(mode: str, **kwargs: Any) -> None: # Abort the whole output if we have no versions if not versions: - perun_log.info("== No results for the given parameters in the .perun/stats/ directory ==") + perun_log.write("== No results for the given parameters in the .perun/stats/ directory ==") return results: list[tuple[Optional[float], str, str | int]] = [] @@ -1358,7 +1380,7 @@ def _print_total_size(total_size: int, enabled: bool) -> None: """ if enabled: formated_total_size = perun_log.format_file_size(total_size) - perun_log.info( + perun_log.write( f"Total size of all the displayed files / directories: " f"{perun_log.in_color(formated_total_size, TEXT_EMPH_COLOUR)}" ) @@ -1389,7 +1411,7 @@ def _print_stat_objects( if record: record += perun_log.in_color(" | ", TEXT_WARN_COLOUR) record += perun_log.in_color(str(prop), TEXT_EMPH_COLOUR) if colored else str(prop) - perun_log.info(record) + perun_log.write(record) def delete_stats_file(name: str, in_minor: str, keep_directory: bool) -> None: diff --git a/perun/logic/config_templates.py b/perun/logic/config_templates.py index c823babc2..abedb7727 100644 --- a/perun/logic/config_templates.py +++ b/perun/logic/config_templates.py @@ -352,7 +352,7 @@ def _locate_workload_candidates() -> list[str]: :return: list of candidate workloads """ - log.info("Looking up candidate workloads") + log.minor_info("Looking for candidate workloads") workload_candidates = [] for file in UserConfiguration._all_candidate_files(UserConfiguration.WORKLOAD_FOLDERS): workload_candidates.append(file) @@ -368,12 +368,15 @@ def _locate_executable_candidates() -> list[str]: :return: list of candidate executables """ # Execute make before, in case there is nothing to make, then beat it - log.info("Looking up candidate executables") - log.info("Try to compile binaries for the project by running make") + log.minor_info("Looking for candidate executables") + log.increase_indent() try: commands.run_safely_list_of_commands(["make"]) + log.minor_success(f"{log.cmd_style('make')}") except subprocess.CalledProcessError: - log.info("Nothing to make...") + log.minor_fail(f"{log.cmd_style('make')}") + log.minor_info("Nothing to make (probably)") + log.decrease_indent() executable_candidates = [] for file in UserConfiguration._all_candidate_files(UserConfiguration.EXECUTABLE_FOLDERS): if os.path.isfile(file) and os.access(file, os.X_OK): diff --git a/perun/logic/index.py b/perun/logic/index.py index 99724a300..c1d04ffd3 100644 --- a/perun/logic/index.py +++ b/perun/logic/index.py @@ -347,12 +347,12 @@ def print_index_from_handle(index_handle: BinaryIO) -> None: index_version = store.read_int_from_handle(index_handle) number_of_entries = store.read_int_from_handle(index_handle) - perun_log.info( + perun_log.write( f"{index_prefix.decode('utf-8')}, index version {index_version} with {number_of_entries} entries\n" ) for entry in walk_index(index_handle): - perun_log.info(str(entry)) + perun_log.write(str(entry)) def touch_index(index_path: str) -> None: @@ -598,9 +598,6 @@ def register_in_index( ) write_entry_to_index(index_filename, entry) - reg_rel_path = os.path.relpath(registered_file) - perun_log.info(f"'{reg_rel_path}' successfully registered in minor version index") - def remove_from_index( base_dir: str, minor_version: str, removed_file_generator: Collection[str] @@ -618,6 +615,7 @@ def remove_from_index( # Nothing to remove removed_profile_number = len(removed_file_generator) if removed_profile_number == 0: + perun_log.minor_info("Nothing to remove") return # Get directory and index @@ -626,6 +624,7 @@ def remove_from_index( if not os.path.exists(minor_version_index): raise EntryNotFoundException("", "empty index") + perun_log.major_info("Removing from index") # Lookup all entries for the given function with open(minor_version_index, "rb+") as index_handle: # Gather all entries from the index @@ -642,16 +641,18 @@ def lookup_function(looked_entry: BasicIndexEntry) -> bool: else: return looked_entry.path == removed_file - found_entry = lookup_entry_within_index(index_handle, lookup_function, removed_file) - removed_entries.append(found_entry) - - perun_log.info( - "{}/{} deregistered {} from index".format( - common_kit.format_counter_number(i + 1, removed_profile_number), - removed_profile_number, - perun_log.in_color(found_entry.path, "grey"), + count_status = f"{common_kit.format_counter_number(i + 1, removed_profile_number)}/{removed_profile_number}" + try: + found_entry = lookup_entry_within_index(index_handle, lookup_function, removed_file) + removed_entries.append(found_entry) + perun_log.minor_success( + f"{count_status} {perun_log.path_style(found_entry.path)}", "deregistered" ) - ) + except EntryNotFoundException: + perun_log.minor_fail( + f"{count_status} {perun_log.path_style(removed_file)}", "not found" + ) + removed_profile_number -= 1 # Update number of entries index_handle.seek(INDEX_NUMBER_OF_ENTRIES_OFFSET) @@ -664,17 +665,15 @@ def lookup_function(looked_entry: BasicIndexEntry) -> bool: entry.write_to(index_handle) index_handle.truncate() + + perun_log.major_info("Summary") if removed_profile_number: - result_string = perun_log.in_color( - f"{common_kit.str_to_plural(removed_profile_number, 'profile')}", - "white", - ["bold"], - ) - perun_log.info( - "successfully deregistered {} from {} index".format( - result_string, perun_log.in_color(minor_version, "green") - ) + result_string = perun_log.success_highlight( + f"{common_kit.str_to_plural(removed_profile_number, 'profile')}" ) + perun_log.minor_status("Removal succeeded for", status=result_string) + else: + perun_log.minor_info("Nothing to remove") def get_profile_list_for_minor(base_dir: str, minor_version: str) -> list[BasicIndexEntry]: diff --git a/perun/logic/runner.py b/perun/logic/runner.py index d949c004c..b9755f843 100644 --- a/perun/logic/runner.py +++ b/perun/logic/runner.py @@ -6,6 +6,7 @@ import distutils.util as dutils import os import signal +import time import subprocess # Third-Party Imports @@ -321,7 +322,6 @@ def run_all_phases_for( @log.print_elapsed_time -@decorators.phase_function("collect") def run_collector(collector: Unit, job: Job) -> tuple[CollectStatus, dict[str, Any]]: """Run the job of collector of the given name. @@ -332,7 +332,8 @@ def run_collector(collector: Unit, job: Job) -> tuple[CollectStatus, dict[str, A :param Job job: additional information about the running job :returns (int, dict): status of the collection, generated profile """ - log.print_current_phase("Collecting data by {}", collector.name, COLLECT_PHASE_COLLECT) + log.print_current_phase("Collecting by collector", collector.name, COLLECT_PHASE_COLLECT) + log.increase_indent() try: collector_module = common_kit.get_module(f"perun.collect.{collector.name}.run") @@ -345,14 +346,19 @@ def run_collector(collector: Unit, job: Job) -> tuple[CollectStatus, dict[str, A collection_report, prof = run_all_phases_for(collector_module, "collector", job_params) if not collection_report.is_ok(): + log.minor_fail(f"Collecting from {log.cmd_style(job.executable.cmd)}") log.error( f"while collecting by {collector.name}: {collection_report.message}", recoverable=True, raised_exception=collection_report.exception, ) else: - log.info(f"Successfully collected data from {job.executable.cmd}") + log.minor_success( + f"Collecting by {log.highlight(collector.name)} from {log.cmd_style(str(job.executable))}", + ) + log.newline() + log.decrease_indent() return cast(CollectStatus, collection_report.status), prof @@ -383,7 +389,6 @@ def run_collector_from_cli_context( @log.print_elapsed_time -@decorators.phase_function("postprocess") def run_postprocessor( postprocessor: Unit, job: Job, prof: dict[str, Any] ) -> tuple[PostprocessStatus, dict[str, Any]]: @@ -398,9 +403,8 @@ def run_postprocessor( :param dict prof: dictionary with profile :returns (int, dict): status of the collection, postprocessed profile """ - log.print_current_phase( - "Postprocessing data with {}", postprocessor.name, COLLECT_PHASE_POSTPROCESS - ) + log.print_current_phase("Postprocessing data", postprocessor.name, COLLECT_PHASE_POSTPROCESS) + log.increase_indent() try: postprocessor_module = common_kit.get_module(f"perun.postprocess.{postprocessor.name}.run") @@ -418,13 +422,15 @@ def run_postprocessor( postprocess_report, prof = run_all_phases_for(postprocessor_module, "postprocessor", job_params) if not postprocess_report.is_ok() or not prof: + log.minor_fail(f"Postprocessing by {postprocessor.name}") log.error( f"while postprocessing by {postprocessor.name}: {postprocess_report.message}", recoverable=True, ) else: - log.info(f"Successfully postprocessed data by {postprocessor.name}") + log.minor_success(f"Postprocessing by {postprocessor.name}") + log.decrease_indent() return cast(PostprocessStatus, postprocess_report.status), prof @@ -441,7 +447,9 @@ def store_generated_profile(prof: Profile, job: Job, profile_name: Optional[str] full_profile_path = os.path.join(profile_directory, full_profile_name) streams.store_json(full_profile.serialize(), full_profile_path) # FIXME: there is an inconsistency in dict/Profile types, needs to be investigated more thoroughly - log.info(f"stored profile at: {os.path.relpath(full_profile_path)}") + log.minor_status( + "stored generated profile ", status=f"{log.path_style(os.path.relpath(full_profile_path))}" + ) if dutils.strtobool(str(config.lookup_key_recursively("profiles.register_after_run", "false"))): # We either store the profile according to the origin, or we use the current head dst = prof.get("origin", pcs.vcs().get_minor_head()) @@ -481,9 +489,7 @@ def run_postprocessor_on_profile( return p_status, processed_profile -@log.print_elapsed_time -@decorators.phase_function("prerun") -def run_prephase_commands(phase: str, phase_colour: ColorChoiceType = "white") -> None: +def run_prephase_commands(phase: str) -> None: """Runs the phase before the actual collection of the methods This command first retrieves the phase from the configuration, and runs @@ -493,15 +499,16 @@ def run_prephase_commands(phase: str, phase_colour: ColorChoiceType = "white") - :cunit:`execute`. :param str phase: name of the phase commands - :param str phase_colour: colour for the printed phase """ phase_key = ".".join(["execute", phase]) if not phase.startswith("execute") else phase cmds = pcs.local_config().safe_get(phase_key, []) if cmds: - log.cprint(f"Running '{phase}' phase", phase_colour) - log.newline() + log.major_info("Prerun") try: + before = time.time() external_commands.run_safely_list_of_commands(cmds) + elapsed = time.time() - before + log.minor_status("Elapsed time", status=f"{elapsed:0.2f}s") except subprocess.CalledProcessError as exception: error_command = str(exception.cmd) error_code = exception.returncode @@ -512,8 +519,6 @@ def run_prephase_commands(phase: str, phase_colour: ColorChoiceType = "white") - ) -@log.print_elapsed_time -@decorators.phase_function("batch job run") def generate_jobs_on_current_working_dir( job_matrix: dict[str, dict[str, list[Job]]], number_of_jobs: int ) -> Iterable[tuple[CollectStatus, Profile, Job]]: @@ -530,11 +535,12 @@ def generate_jobs_on_current_working_dir( log.print_job_progress.current_job = 1 collective_status = CollectStatus.OK - log.newline() + + log.major_info("Running Jobs") for job_cmd, workloads_per_cmd in job_matrix.items(): - log.print_current_phase("Collecting profiles for {}", job_cmd, COLLECT_PHASE_CMD) for workload, jobs_per_workload in workloads_per_cmd.items(): - log.print_current_phase(" = processing generator {}", workload, COLLECT_PHASE_WORKLOAD) + log.print_current_phase("Collecting for command", job_cmd, COLLECT_PHASE_CMD) + log.print_current_phase("Generating by workload", workload, COLLECT_PHASE_WORKLOAD) # Prepare the specification generator_spec = workload_generators_specs.get( workload, GeneratorSpec(SingletonGenerator, {"value": workload}) @@ -564,8 +570,6 @@ def generate_jobs_on_current_working_dir( yield collective_status, prof, job -@log.print_elapsed_time -@decorators.phase_function("overall profiling") def generate_jobs( minor_version_list: list[MinorVersion], job_matrix: dict[str, dict[str, list[Job]]], @@ -579,12 +583,10 @@ def generate_jobs( with vcs_kit.CleanState(): for minor_version in minor_version_list: pcs.vcs().checkout(minor_version.checksum) - run_prephase_commands("pre_run", COLLECT_PHASE_CMD) + run_prephase_commands("pre_run") yield from generate_jobs_on_current_working_dir(job_matrix, number_of_jobs) -@log.print_elapsed_time -@decorators.phase_function("overall profiling") def generate_jobs_with_history( minor_version_list: list[MinorVersion], job_matrix: dict[str, dict[str, list[Job]]], @@ -602,7 +604,7 @@ def generate_jobs_with_history( log.newline() history.finish_minor_version(minor_version, []) pcs.vcs().checkout(minor_version.checksum) - run_prephase_commands("pre_run", COLLECT_PHASE_CMD) + run_prephase_commands("pre_run") yield from generate_jobs_on_current_working_dir(job_matrix, number_of_jobs) log.newline() history.flush(with_border=True) @@ -615,7 +617,7 @@ def generate_profiles_for( postprocessor: list[str], minor_version_list: list[MinorVersion], **kwargs: Any, -) -> Iterable[tuple[CollectStatus, Profile, str]]: +) -> Iterable[tuple[CollectStatus, Profile, Job]]: """Helper generator, that takes job specification and continuously generates profiles This is mainly used for fuzzing, which requires to handle the profiles without any storage, @@ -654,6 +656,7 @@ def run_single_job( :return: CollectStatus.OK if all jobs were successfully collected, CollectStatus.ERROR if any of collections or postprocessing failed """ + log.major_info("Running From Single Job") job_matrix, number_of_jobs = construct_job_matrix( cmd, workload, collector, postprocessor, **kwargs ) @@ -675,6 +678,7 @@ def run_matrix_job( :return: CollectStatus.OK if all jobs were successfully collected, CollectStatus.ERROR if any of collections or postprocessing failed """ + log.major_info("Running Matrix Job") job_matrix, number_of_jobs = construct_job_matrix(**load_job_info_from_config()) generator_function = generate_jobs_with_history if with_history else generate_jobs status = CollectStatus.OK diff --git a/perun/postprocess/regression_analysis/methods.py b/perun/postprocess/regression_analysis/methods.py index df3943c53..11b3f1fba 100644 --- a/perun/postprocess/regression_analysis/methods.py +++ b/perun/postprocess/regression_analysis/methods.py @@ -59,8 +59,9 @@ def compute( result["method"] = method analysis.append(result) except exceptions.GenericRegressionExceptionBase as exc: - log.info(f"unable to perform regression analysis on function '{chunk[2]}'.") - log.info(f" - {exc}") + log.minor_info( + f"unable to perform regression analysis on function '{chunk[2]} due to: {exc}", + ) # Compute the derived models for der in compute_derived(derived, analysis, **kwargs): analysis.append(der) diff --git a/perun/utils/common/cli_kit.py b/perun/utils/common/cli_kit.py index f4cf2bf0e..d95ffa0ad 100644 --- a/perun/utils/common/cli_kit.py +++ b/perun/utils/common/cli_kit.py @@ -45,7 +45,7 @@ def print_version(_: click.Context, __: click.Option, value: bool) -> None: """Prints current version of Perun and ends""" if value: - log.info(f"Perun {perun.__version__}") + log.write(f"Perun {perun.__version__}") exit(0) @@ -409,14 +409,15 @@ def lookup_profile_in_filesystem(profile_name: str) -> str: if os.path.exists(profile_name): return profile_name - log.info(f"file '{profile_name}' does not exist. Checking pending jobs...") + log.minor_fail(f"file '{profile_name}'", "does not exist") + log.minor_info("Checking pending jobs") # 2) if it does not exist check pending job_dir = pcs.get_job_directory() job_path = os.path.join(job_dir, profile_name) if os.path.exists(job_path): return job_path - log.info(f"file '{profile_name}' not found in pending jobs...") + log.minor_fail(f"file '{profile_name}'", "not found in pending jobs") # 3) if still not found, check recursively all candidates for match and ask for confirmation searched_regex = re.compile(profile_name) for root, _, files in os.walk(os.getcwd()): @@ -482,7 +483,8 @@ def lookup_any_profile_callback(_: click.Context, __: click.Argument, value: str if profile_from_index: return profile_from_index - log.info(f"file '{value}' not found in index. Checking filesystem...") + log.minor_fail(f"file '{value}'", "not found in index") + log.minor_info("Checking filesystem.") # 2) Else lookup filenames and load the profile abs_path = lookup_profile_in_filesystem(value) if not os.path.exists(abs_path): @@ -776,7 +778,7 @@ def split_requirement(requirement: str) -> str: with open(dump_file, "w") as dump_handle: dump_handle.write(output) - log.info(f"Saved dump to '{dump_file}'") + log.minor_status("Saved dump", status=f"{log.path_style(dump_file)}") def set_optimization(_: click.Context, param: click.Argument, value: str) -> str: diff --git a/perun/utils/common/common_kit.py b/perun/utils/common/common_kit.py index 8142a5164..112337079 100644 --- a/perun/utils/common/common_kit.py +++ b/perun/utils/common/common_kit.py @@ -151,7 +151,7 @@ def str_to_plural(count: int, verb: str) -> str: :param int count: number of the verbs :param str verb: name of the verb we are creating a plural for """ - return str(count) + " " + verb + "s" if count != 1 else verb + return str(count) + " " + (verb + "s" if count != 1 else verb) def format_counter_number(count: int, max_number: int) -> str: diff --git a/perun/utils/common/script_kit.py b/perun/utils/common/script_kit.py index 1138621c7..a19415680 100644 --- a/perun/utils/common/script_kit.py +++ b/perun/utils/common/script_kit.py @@ -43,6 +43,7 @@ def template_name_filter(template_name: str) -> bool: """ return template_name.startswith(template_type) + log.major_info("Creating Unit from Template") # Lookup the perun working dir according to the current file perun_dev_dir = os.path.abspath(os.path.join(os.path.split(__file__)[0], "..")) if ( @@ -55,6 +56,7 @@ def template_name_filter(template_name: str) -> bool: "(either not writeable or does not exist)\n\n" "Perhaps you are not working from perun dev folder?" ) + log.minor_status("Target Perun development dir", status=log.path_style(perun_dev_dir)) # Initialize the jinja2 environment and load all templates for template_type set env = jinja2.Environment(loader=jinja2.PackageLoader("perun", "templates"), autoescape=True) @@ -67,10 +69,11 @@ def template_name_filter(template_name: str) -> bool: common_kit.touch_dir(target_dir) else: target_dir = os.path.join(perun_dev_dir, template_type) - log.info(f"Initializing new {template_type} module in {target_dir}") + log.minor_status(f"Initializing new {template_type} module", status=log.path_style(target_dir)) # Iterate through all templates and create the new files with rendered templates successfully_created_files = [] + log.increase_indent() for template_file in list_of_templates: # Specify the target filename of the template file template_filename, _ = os.path.splitext(template_file) @@ -79,28 +82,27 @@ def template_name_filter(template_name: str) -> bool: ) template_filename += ".py" successfully_created_files.append(os.path.join(target_dir, template_filename)) - log.info(f"> creating module '{template_filename}' from template", end="") - # Render and write the template into the resulting file with open(os.path.join(target_dir, template_filename), "w") as template_handle: template_handle.write(env.get_template(template_file).render(**kwargs)) + log.minor_success(f"module {log.path_style(template_filename)}", "created") - log.info(" [", end="") - log.done(ending="") - log.info("]") + log.decrease_indent() # Add the registration point to the set of file successfully_created_files.append(os.path.join(perun_dev_dir, template_type, "../__init__.py")) if template_type in ("postprocess", "collect", "view"): successfully_created_files.append(os.path.join(perun_dev_dir, "utils", "../__init__.py")) - log.info( - f"Please register your new module in '{template_type}/__init__.py' and/or 'utils/__init__.py'" - ) + log.minor_info("New module has to be registered at", end=":\n") + log.increase_indent() + log.minor_info(log.path_style(f"{os.path.join(template_type, '__init.py')}")) + log.minor_info(log.path_style(f"{os.path.join('utils', '__init.py')}")) + log.decrease_indent() # Unless specified in other way, open all of the files in the w.r.t the general.editor key if not no_edit: editor = config.lookup_key_recursively("general.editor") - log.info(f"Opening created files and registration point in {editor}") + log.minor_status("Opening the files in", status=f"{log.cmd_style(editor)}") try: commands.run_external_command([editor] + successfully_created_files[::-1]) except Exception as inner_exception: diff --git a/perun/utils/decorators.py b/perun/utils/decorators.py index 6bd6c5dee..1d264a0aa 100644 --- a/perun/utils/decorators.py +++ b/perun/utils/decorators.py @@ -166,24 +166,3 @@ def inner_wrapper(func: Callable[..., Any]) -> Callable[..., Any]: return func return inner_wrapper - - -def phase_function(phase_name: str) -> Callable[..., Any]: - """Sets the phase name for the given function - - The phase name is output when the elapsed time is printed. - - :param str phase_name: name of the phase to which the given function corresponds - :return: decorated function with new phase name - """ - - def inner_wrapper(func: Callable[..., Any]) -> Callable[..., Any]: - """Inner wrapper of the decorated function - - :param function func: function we are decorating with its phase name - :return: decorated function with new phase name - """ - func.phase_name = phase_name # type: ignore - return func - - return inner_wrapper diff --git a/perun/utils/external/commands.py b/perun/utils/external/commands.py index 79c8ce5e6..2e35a5cee 100644 --- a/perun/utils/external/commands.py +++ b/perun/utils/external/commands.py @@ -105,10 +105,10 @@ def run_safely_list_of_commands(cmd_list: list[str]) -> None: :raise subprocess.CalledProcessError: when there is an error in any of the commands """ for cmd in cmd_list: - log.info(">", cmd) + log.write(">", cmd) out, err = run_safely_external_command(cmd) if out: - log.info(out.decode("utf-8"), end="") + log.write(out.decode("utf-8"), end="") if err: log.cprint(err.decode("utf-8"), "red") diff --git a/perun/utils/log.py b/perun/utils/log.py index 227d37e9c..e767b8337 100644 --- a/perun/utils/log.py +++ b/perun/utils/log.py @@ -46,6 +46,7 @@ VERBOSITY: int = 0 COLOR_OUTPUT: bool = True +CURRENT_INDENT: int = 0 # Enum of verbosity levels VERBOSE_DEBUG: int = 2 @@ -56,6 +57,18 @@ SUPPRESS_PAGING: bool = True +def increase_indent() -> None: + """Increases the indent for minor and major steps""" + global CURRENT_INDENT + CURRENT_INDENT += 1 + + +def decrease_indent() -> None: + """Increases the indent for minor and major steps""" + global CURRENT_INDENT + CURRENT_INDENT -= 1 + + def is_verbose_enough(verbosity_peak: int) -> bool: """Tests if the current verbosity of the log is enough @@ -150,21 +163,6 @@ def msg_to_file(msg: str, msg_verbosity: int, log_level: int = logging.INFO) -> _log_msg(logging.log, msg, msg_verbosity, log_level) -def info(msg: str, end: str = "\n") -> None: - """ - :param str msg: info message that will be printed only when there is at least lvl1 verbosity - :param str end: - """ - print(f"{msg}", end=end) - - -def quiet_info(msg: str) -> None: - """ - :param str msg: info message to the stream that will be always shown - """ - msg_to_stdout(msg, VERBOSE_RELEASE) - - def extract_stack_frame_info(frame: traceback.FrameSummary) -> tuple[str, str]: """Helper function for returning name and filename from frame. @@ -209,6 +207,14 @@ def print_current_stack( print(in_color("".join(traceback.format_list(reduced_trace)), colour), file=sys.stderr) +def write(msg: str, end: str = "\n") -> None: + """ + :param str msg: info message that will be printed only when there is at least lvl1 verbosity + :param str end: + """ + print(f"{msg}", end=end) + + def error( msg: str, recoverable: bool = False, @@ -219,7 +225,7 @@ def error( :param bool recoverable: whether we can recover from the error :param Exception raised_exception: exception that was raised before the error """ - print(in_color(f"fatal: {msg}", "red"), file=sys.stderr) + print(f"{tag('error', 'red')} {in_color(msg, 'red')}", file=sys.stderr) if is_verbose_enough(VERBOSE_DEBUG): print_current_stack(raised_exception=raised_exception) @@ -234,7 +240,7 @@ def warn(msg: str, end: str = "\n") -> None: :param str end: """ if not SUPPRESS_WARNINGS: - print(f"warning: {msg}", end=end) + print(f"{tag('warning', 'yellow')} {msg}", end=end) def print_current_phase(phase_msg: str, phase_unit: str, phase_colour: ColorChoiceType) -> None: @@ -244,7 +250,10 @@ def print_current_phase(phase_msg: str, phase_unit: str, phase_colour: ColorChoi :param str phase_unit: additional parameter that is passed to the phase_msg :param str phase_colour: phase colour defined in common_kit.py """ - print(in_color(phase_msg.format(in_color(phase_unit)), phase_colour, COLLECT_PHASE_ATTRS)) + minor_status( + in_color(phase_msg.strip().capitalize(), phase_colour, COLLECT_PHASE_ATTRS), + status=highlight(phase_unit), + ) @decorators.static_variables(current_job=1) @@ -254,7 +263,7 @@ def print_job_progress(overall_jobs: int) -> None: :param int overall_jobs: overall number of jobs to be done """ percentage_done = round((print_job_progress.current_job / overall_jobs) * 100) - print(f"[{str(percentage_done).rjust(3, ' ')}%] ", end="") + minor_status("Progress of the job", status=f"{str(percentage_done).rjust(3, ' ')}%") print_job_progress.current_job += 1 @@ -281,50 +290,80 @@ def cprintln(string: str, colour: ColorChoiceType, attrs: Optional[AttrChoiceTyp print(in_color(string, colour, attrs)) -def done(ending: str = "\n") -> None: - """Helper function that will print green done to the terminal +def tick(tick_symbol: str = ".") -> None: + """Prints single dot or other symbol - :param str ending: end of the string, by default new line + :param tick_symbol: symbol printed as tick """ - print("[", end="") - cprint("DONE", "green", attrs=["bold"]) - print("]", end=ending) + print(tick_symbol, end="") -def failed(ending: str = "\n") -> None: +def skipped(ending: str = "\n") -> None: """ :param str ending: end of the string, by default new line """ - print("[", end="") - cprint("FAILED", "red", attrs=["bold"]) - print("]", end=ending) + write(in_color("skipped", color="light_grey", attribute_style=["bold"]), end=ending) -def skipped(ending: str = "\n") -> None: - """ - :param str ending: end of the string, by default new line +def major_info(msg: str, colour: ColorChoiceType = "blue", no_title: bool = False) -> None: + """Prints major information, formatted in brackets [], in bold and optionally in color + + :param msg: printed message + :param no_title: if set to true, then the title will be printed as it is + :param colour: optional colour """ - print("[", end="") - cprint("skip", "grey", attrs=["bold"]) - print("]", end=ending) + stripped_msg = msg.strip() if no_title else msg.strip().title() + printed_msg = "[" + in_color(stripped_msg, colour, attribute_style=["bold"]) + "]" + newline() + write(" " * CURRENT_INDENT * 2 + printed_msg) + newline() + +def minor_status(msg: str, status: str = "", sep: str = "-") -> None: + """Prints minor status containing of two pieces of information: action and its status -def yes(ending: str = "\n") -> None: + It prints the status of some action, starting with `-` with indent and ending with newline. + + :param msg: printed message, which will be stripped from whitespace and capitalized + :param status: status of the info + :param sep: separator used to separate the info with its results """ - :param str ending: end of the string, by default new line + write(" " * CURRENT_INDENT * 2 + f" - {msg.strip().capitalize()} {sep} {status}") + + +def minor_info(msg: str, end: str = "\n") -> None: + """Prints minor information, formatted with indent and starting with - + + Note, that there are some sanitizations happening: + 1. If we want to end the info in new line, we add the punctuations; + + :param msg: printed message, which will be stripped from whitespace and capitalized + :param end: ending of the message """ - print("[", end="") - cprint("\u2714", "green", attrs=["bold"]) - print("]", end=ending) + msg = msg.strip().capitalize() + if end == "\n" and msg[-1] not in ".!;": + msg += "." + write(" " * CURRENT_INDENT * 2 + f" - {msg}", end) + +def minor_fail(msg: str, fail_message: str = "failed") -> None: + """Helper function for shortening some messages""" + minor_status(msg, status=failed_highlight(fail_message)) -def no(ending: str = "\n") -> None: + +def minor_success(msg: str, success_message: str = "succeeded") -> None: + """Helper function for shortening some messages""" + minor_status(msg, status=success_highlight(success_message)) + + +def tag(tag_str: str, colour: ColorChoiceType) -> str: """ + :param tag_str: printed tag + :param colour: colour of the tag :param str ending: end of the string, by default new line + :return: formatted tag """ - print("[", end="") - cprint("\u2717", "red", attrs=["bold"]) - print("]", end=ending) + return "[" + in_color(tag_str.upper(), colour, attribute_style=["bold"]) + "]" def newline() -> None: @@ -334,6 +373,51 @@ def newline() -> None: print("") +def path_style(path_str: str) -> str: + """Unified formatting and colouring of the path. + + :param path_str: string that corresponds to path + :return: stylized path string + """ + return in_color(path_str, "yellow", attribute_style=["bold"]) + + +def cmd_style(cmd_str: str) -> str: + """Unified formatting and colouring of the commands + + :param cmd_str: string that corresponds to command that should be run in terminal + :return: stylized command string + """ + return in_color(f"`{cmd_str}`", "light_grey") + + +def highlight(highlighted_str: str) -> str: + """Highlights the string + + :param highlighted_str: string that will be highlighted + :return: highlighted string + """ + return in_color(highlighted_str, "blue", attribute_style=["bold"]) + + +def success_highlight(highlighted_str: str) -> str: + """Highlights of the string that is considered successful + + :param highlighted_str: string that will be highlighted + :return: highlighted string + """ + return in_color(highlighted_str, "green", attribute_style=["bold"]) + + +def failed_highlight(highlighted_str: str) -> str: + """Highlights of the string that is considered failure + + :param highlighted_str: string that will be highlighted + :return: highlighted string + """ + return in_color(highlighted_str, "red", attribute_style=["bold"]) + + def in_color( output: str, color: ColorChoiceType = "white", attribute_style: Optional[AttrChoiceType] = None ) -> str: @@ -462,7 +546,7 @@ def print_short_change_string(counts: dict[str, int]) -> None: newline() -def _print_models_info(deg_info: DegradationInfo, model_strategy: str) -> None: +def _print_models_info(deg_info: DegradationInfo) -> None: """ The function prints information about both models from detection. @@ -494,25 +578,19 @@ def print_models_kinds( :param str target_colour: target colour to print target string :param str attrs: name of type attributes for the colouring """ - info(baseline_str, end="") + write(baseline_str, end="") cprint(f"{deg_info.from_baseline}", colour=baseline_colour, attrs=attrs) - info(target_str, end="") + write(target_str, end="") cprint(f"{deg_info.to_target}", colour=target_colour, attrs=attrs) from_colour, to_colour = get_degradation_change_colours(deg_info.result) - if model_strategy == "best-param": - print_models_kinds(" from: ", from_colour, " -> to: ", to_colour, ["bold"]) - elif model_strategy in ("best-nonparam", "best-model", "best-both"): - print_models_kinds(" base: ", "blue", " targ: ", "blue", ["bold"]) - elif model_strategy in ("all-nonparam", "all-param", "all-models"): - info(" model: ", end="") - cprint(f"{deg_info.from_baseline}", colour="blue", attrs=["bold"]) + print_models_kinds(" from: ", from_colour, " -> to: ", to_colour, ["bold"]) if deg_info.confidence_type != "no": - info(" (with confidence ", end="") + write(" (with confidence ", end="") cprint(f"{deg_info.confidence_type} = {deg_info.confidence_rate}", "white", ["bold"]) - info(")", end="") + write(")", end="") def _print_partial_intervals( @@ -541,7 +619,6 @@ def _print_partial_intervals( def print_list_of_degradations( degradation_list: list[tuple[DegradationInfo, str, str]], - model_strategy: str = "best-model", ) -> None: """Prints list of found degradations grouped by location @@ -551,10 +628,9 @@ def print_list_of_degradations( {result} from {from} -> to {to} :param list degradation_list: list of found degradations - :param str model_strategy: detection model strategy for obtains the relevant kind of models """ if not degradation_list: - info("no changes found") + write("no changes found") return def keygetter(item: tuple[DegradationInfo, str, str]) -> str: @@ -569,12 +645,12 @@ def keygetter(item: tuple[DegradationInfo, str, str]) -> str: degradation_list.sort(key=keygetter) for location, changes in itertools.groupby(degradation_list, keygetter): # Print the location - info("at", end="") + write("at", end="") cprint(f" {location}", "white", attrs=["bold"]) - info(":") + write(":") # Iterate and print everything for deg_info, cmd, __ in changes: - info("\u2514 ", end="") + write("\u2514 ", end="") if deg_info.rate_degradation_relative > 0.0 or deg_info.rate_degradation_relative < 0.0: cprint( f"{round(deg_info.rate_degradation, 2)}ms ({round(deg_info.rate_degradation_relative, 2)}%)", @@ -583,21 +659,21 @@ def keygetter(item: tuple[DegradationInfo, str, str]) -> str: ) else: cprint(f"{round(deg_info.rate_degradation, 2)}x", "white", ["bold"]) - info(": ", end="") + write(": ", end="") cprint(deg_info.type, CHANGE_TYPE_COLOURS.get(deg_info.type, "white")) - info(" ", end="") + write(" ", end="") cprint( f"{CHANGE_STRINGS[deg_info.result]}", CHANGE_COLOURS[deg_info.result], ["bold"], ) if deg_info.result != PerformanceChange.NoChange: - _print_models_info(deg_info, model_strategy) + _print_models_info(deg_info) # Print information about command that was executed - info(" (", end="") + write(" (", end="") cprint(f"$ {cmd}", CHANGE_CMD_COLOUR, ["bold"]) - info(")") + write(")") # Print information about the change on the partial intervals (only at Local-Statistics) if len(deg_info.partial_intervals) > 0: @@ -629,10 +705,7 @@ def aggregate_intervals( :return list: list of the aggregated partial intervals to print """ # Fixme: This is baaaad. But the partial intervals are somewhat broken (sometimes list, sometimes narray) - if isinstance(input_intervals, list): - intervals = np.array(input_intervals) - else: - intervals = input_intervals + intervals = np.array(input_intervals) if isinstance(input_intervals, list) else input_intervals def get_indices_of_intervals() -> Iterable[tuple[int, int]]: """ @@ -696,13 +769,8 @@ def inner_wrapper(*args: Any, **kwargs: Any) -> Any: before = time.time() results = func(*args, **kwargs) elapsed = time.time() - before - print( - "[!] {} [{}] in {} [!]".format( - (func.phase_name if hasattr(func, "phase_name") else func.__name__).title(), - in_color("DONE", "green", ["bold"]), - in_color(f"{elapsed:0.2f}s", "white", ["bold"]), - ) - ) + minor_status("Elapsed time", status=f"{elapsed:0.2f}s") + return results return inner_wrapper diff --git a/perun/vcs/git_repository.py b/perun/vcs/git_repository.py index 2fa93ed1a..0dc2bbc3a 100644 --- a/perun/vcs/git_repository.py +++ b/perun/vcs/git_repository.py @@ -65,13 +65,19 @@ def init(self, vcs_init_params: dict[str, Any]) -> bool: and not os.listdir(self.vcs_path) ): os.rmdir(self.vcs_path) - perun_log.error(f"while git init: {gce}", recoverable=True) + perun_log.error(f"while initializing git: {gce}", recoverable=True) return False if self.valid_repo: - perun_log.quiet_info(f"Reinitialized existing Git repository in {self.vcs_path}") + perun_log.minor_status( + f"Reinitialized {perun_log.highlight('existing')} Git repository", + status=f"{perun_log.path_style(self.vcs_path)}", + ) else: - perun_log.quiet_info(f"Initialized empty Git repository in {self.vcs_path}") + perun_log.minor_status( + f"Initialized {perun_log.highlight('empty')} Git repository", + status=f"{perun_log.path_style(self.vcs_path)}", + ) self._set_git_repo(self.vcs_path) return True diff --git a/perun/view/tableof/run.py b/perun/view/tableof/run.py index 5ac49ab2c..01f7dc864 100644 --- a/perun/view/tableof/run.py +++ b/perun/view/tableof/run.py @@ -47,7 +47,7 @@ def output_table_to(table: str, target: str, target_file: str) -> None: with open(target_file, "w") as wtf: wtf.write(table) else: - log.info(table) + log.write(table) def create_table_from( diff --git a/perun/workload/__init__.py b/perun/workload/__init__.py index 8b778219c..ee1b45f62 100644 --- a/perun/workload/__init__.py +++ b/perun/workload/__init__.py @@ -39,29 +39,29 @@ def load_generator_specifications() -> dict[str, GeneratorSpec]: specifications_from_config = config.gather_key_recursively("generators.workload") spec_map = {} warnings = [] - log.info("Loading workload generator specifications ", end="") + log.minor_info("Loading workload generator specifications") for spec in specifications_from_config: if "id" not in spec.keys() or "type" not in spec.keys(): - warnings.append("incorrect workload specification: missing 'id' or 'type'") - log.info("F", end="") + warnings.append((spec.get("id", "?"), "specification is missing 'id' or 'type' key")) + log.tick("F") continue generator_module = f"perun.workload.{spec['type'].lower()}_generator" constructor_name = f"{spec['type'].title()}Generator" try: constructor = getattr(common_kit.get_module(generator_module), constructor_name) spec_map[spec["id"]] = GeneratorSpec(constructor, spec) - log.info(".", end="") + log.tick() except (ImportError, AttributeError): - warnings.append( - f"incorrect workload generator '{spec['id']}': '{spec['type']}' is not valid workload type" - ) - log.info("F", end="") + warnings.append((spec["id"], f"{spec['type']} is not valid workload type")) + log.tick("F") + log.newline() # Print the warnings and badge if warnings: - log.failed() - log.info("\n".join(warnings)) + log.minor_fail("Workload generators") + for workload_id, warning in warnings: + log.minor_fail(f"workload {log.highlight(workload_id)}", warning) else: - log.done() + log.minor_success("Workload generators", "loaded") return spec_map diff --git a/tests-perf/test_performance.py b/tests-perf/test_performance.py index 6c66ee0f4..6bb47586a 100644 --- a/tests-perf/test_performance.py +++ b/tests-perf/test_performance.py @@ -29,19 +29,18 @@ def run_benchmark(benchmark_dir, performance_tests): """ possible_tests = ("load", "query", "convert", "store") executed_tests = performance_tests or possible_tests - log.info("Running benchmark_dir: {}".format(log.in_color(benchmark_dir, "red"))) + log.write("Running benchmark_dir: {}".format(log.in_color(benchmark_dir, "red"))) results = [] r, d = os.path.split(benchmark_dir) store_dir = os.path.join(r, "store-" + d) common_kit.touch_dir(store_dir) for bench in os.listdir(benchmark_dir): - log.info(" > {}".format(log.in_color(bench, "yellow"))) + log.write(" > {}".format(log.in_color(bench, "yellow"))) results.append(performance_test(benchmark_dir, bench, store_dir, executed_tests)) - log.done() - log.info("") - log.info("") + log.write("") + log.write("") headers = ["file"] + [pt for pt in possible_tests if pt in executed_tests] - log.info(tabulate.tabulate(results, headers=headers, floatfmt=".2f")) + log.write(tabulate.tabulate(results, headers=headers, floatfmt=".2f")) with open(benchmark_dir + ".html", "w") as hh: hh.write(tabulate.tabulate(results, headers=headers, tablefmt="html", floatfmt=".2f")) @@ -61,14 +60,14 @@ def performance_test(bench_dir, file, store_dir, executed_tests): profile = store.load_profile_from_file(os.path.join(bench_dir, file), True) elapsed = time.time() - before results.append(elapsed) - log.info("Loading profile: {}".format(log.in_color("{:0.2f}s".format(elapsed), "white"))) + log.write("Loading profile: {}".format(log.in_color("{:0.2f}s".format(elapsed), "white"))) if "query" in executed_tests: before = time.time() _ = list(profile.all_resources()) elapsed = time.time() - before results.append(elapsed) - log.info( + log.write( "Iterating all resources: {}".format(log.in_color("{:0.2f}s".format(elapsed), "white")) ) @@ -77,7 +76,7 @@ def performance_test(bench_dir, file, store_dir, executed_tests): _ = convert.resources_to_pandas_dataframe(profile) elapsed = time.time() - before results.append(elapsed) - log.info( + log.write( "Converting to dataframe: {}".format(log.in_color("{:0.2f}s".format(elapsed), "white")) ) @@ -86,7 +85,7 @@ def performance_test(bench_dir, file, store_dir, executed_tests): streams.store_json(profile.serialize(), os.path.join(store_dir, file)) elapsed = time.time() - before results.append(elapsed) - log.info("Storing profile: {}".format(log.in_color("{:0.2f}s".format(elapsed), "white"))) + log.write("Storing profile: {}".format(log.in_color("{:0.2f}s".format(elapsed), "white"))) return results @@ -98,6 +97,6 @@ def performance_test(bench_dir, file, store_dir, executed_tests): else: run_benchmark(os.path.join("tests-perf", "monster-profiles"), performance_tests) benchmark_time = time.time() - start_time - log.info( + log.write( "Benchmark finished in {}".format(log.in_color("{:0.2f}s".format(benchmark_time), "white")) ) diff --git a/tests/conftest.py b/tests/conftest.py index f564fee5f..895eefabb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -523,6 +523,7 @@ def setup(): # Reset the verbosity to release log.VERBOSITY = 0 + log.CURRENT_INDENT = 0 # We disable the metrics by default, since they might slow down tests metrics.Metrics.enabled = False yield diff --git a/tests/test_add.py b/tests/test_add.py index 2ce99bbf4..6d0a8049e 100644 --- a/tests/test_add.py +++ b/tests/test_add.py @@ -16,6 +16,7 @@ # Perun Imports from perun.logic import store, index, commands from perun.utils import timestamps +from perun.utils.common import common_kit from perun.utils.exceptions import ( NotPerunRepositoryException, UnsupportedModuleException, @@ -125,10 +126,10 @@ def test_add_on_empty_repo(pcs_with_empty_git, valid_profile_pool, capsys): # Assert that the error message is OK _, err = capsys.readouterr() expected = ( - "fatal: while fetching head minor version: " + "while fetching head minor version: " f"Reference at 'refs/heads/{git_default_branch_name}' does not exist" ) - assert err.strip() == termcolor.colored(expected, "red", force_color=True) + assert termcolor.colored(expected, "red", force_color=True) in err.strip() def test_add_on_no_vcs(pcs_without_vcs, valid_profile_pool): @@ -264,8 +265,9 @@ def test_add_wrong_profile(pcs_full, error_profile_pool, capsys): after_count = test_utils.count_contents_on_path(pcs_full.get_path()) assert before_count == after_count - _, err = capsys.readouterr() - assert "notexisting.perf does not exist" in err + out, err = capsys.readouterr() + assert "Profile does not exist" in out + assert "Registration failed for - 1 profile" in common_kit.escape_ansi(err) def test_add_existing(pcs_full, valid_profile_pool, capsys): diff --git a/tests/test_cli.py b/tests/test_cli.py index 99b4de0f2..74236e1c8 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -184,7 +184,7 @@ def test_regressogram_correct(pcs_single_prof): runner, regressogram_params + correct_test["params"], 0, - correct_test.get("output", "Successfully postprocessed"), + correct_test.get("output", "succeeded"), ) @@ -202,14 +202,14 @@ def moving_average_runner_test(runner, tests_set, tests_edge, exit_code, cprof_i runner, moving_average_params + moving_average_methods[n] + test["params"], exit_code, - test.get("output", "Successfully postprocessed"), + test.get("output", "succeeded"), ) else: run_non_param_test( runner, moving_average_params + moving_average_methods[method_idx] + test["params"], exit_code, - test.get("output", "Successfully postprocessed"), + test.get("output", "succeeded"), ) method_idx += 1 if idx + 1 == tests_edge[method_idx] else 0 @@ -448,7 +448,7 @@ def kernel_regression_runner_test(runner, tests_set, tests_edge, exit_code, cpro runner, kernel_regression_params + kernel_regression_modes[mode_idx] + test["params"], exit_code, - test.get("output", "Successfully postprocessed"), + test.get("output", "succeeded"), ) mode_idx += 1 if idx + 1 == tests_edge[mode_idx] else 0 @@ -1021,34 +1021,34 @@ def test_reg_analysis_correct(pcs_single_prof): [cprof_idx, "regression-analysis", "-m", "full", "-m", "iterative"], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test the full computation method with all models set as a default value result = runner.invoke(cli.postprocessby, [cprof_idx, "regression-analysis", "-m", "full"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test the iterative method with all models result = runner.invoke(cli.postprocessby, [cprof_idx, "regression-analysis", "-m", "iterative"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test the interval method with all models result = runner.invoke(cli.postprocessby, [cprof_idx, "regression-analysis", "-m", "interval"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test the initial guess method with all models result = runner.invoke( cli.postprocessby, [cprof_idx, "regression-analysis", "-m", "initial_guess"] ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test the bisection method with all models result = runner.invoke(cli.postprocessby, [cprof_idx, "regression-analysis", "-m", "bisection"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test the bisection method with more complex model pool_path = os.path.join(os.path.split(__file__)[0], "profiles", "degradation_profiles") @@ -1057,14 +1057,14 @@ def test_reg_analysis_correct(pcs_single_prof): cli.postprocessby, [f"{complex_file}", "regression-analysis", "-m", "bisection"] ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test explicit models specification on full computation result = runner.invoke( cli.postprocessby, [cprof_idx, "regression-analysis", "-m", "full", "-r", "all"] ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test explicit models specification for multiple models result = runner.invoke( @@ -1083,7 +1083,7 @@ def test_reg_analysis_correct(pcs_single_prof): ], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test explicit models specification for all models result = runner.invoke( @@ -1104,7 +1104,7 @@ def test_reg_analysis_correct(pcs_single_prof): ], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test explicit models specification for all models values (also with 'all' value) result = runner.invoke( @@ -1127,7 +1127,7 @@ def test_reg_analysis_correct(pcs_single_prof): ], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test steps specification for full computation which has no effect result = runner.invoke( @@ -1135,7 +1135,7 @@ def test_reg_analysis_correct(pcs_single_prof): [cprof_idx, "regression-analysis", "-m", "full", "-r", "all", "-s", "100"], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test reasonable steps value for iterative method result = runner.invoke( @@ -1143,8 +1143,8 @@ def test_reg_analysis_correct(pcs_single_prof): [cprof_idx, "regression-analysis", "-m", "iterative", "-r", "all", "-s", "4"], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, result.output.count("Too few point") == 5) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, result.output.count("too few point") == 5) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test too many steps output result = runner.invoke( @@ -1161,8 +1161,8 @@ def test_reg_analysis_correct(pcs_single_prof): ], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, result.output.count("Too few point") == 7) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, result.output.count("too few point") == 7) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test steps value clamping with iterative method result = runner.invoke( @@ -1170,7 +1170,7 @@ def test_reg_analysis_correct(pcs_single_prof): [cprof_idx, "regression-analysis", "-m", "iterative", "-r", "all", "-s", "-1"], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) # Test different arguments positions result = runner.invoke( @@ -1178,7 +1178,7 @@ def test_reg_analysis_correct(pcs_single_prof): [cprof_idx, "regression-analysis", "-s", "2", "-r", "all", "-m", "full"], ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "Successfully postprocessed" in result.output) + asserts.predicate_from_cli(result, "succeeded" in result.output) def test_status_correct(pcs_single_prof): @@ -1438,7 +1438,9 @@ def test_add_massaged_head(pcs_full_no_prof, valid_profile_pool): runner = CliRunner() result = runner.invoke(cli.add, ["0@p", "--minor=HEAD"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, f"'{first_tagged}' successfully registered" in result.output) + asserts.predicate_from_cli( + result, f"{first_tagged} - registered" in common_kit.escape_ansi(result.output) + ) runner = CliRunner() result = runner.invoke(cli.add, ["0@p", r"--minor=HEAD^{d"]) @@ -1480,19 +1482,25 @@ def test_add_tag(monkeypatch, pcs_full_no_prof, valid_profile_pool): runner = CliRunner() result = runner.invoke(cli.add, ["0@p"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, f"'{first_sha}' successfully registered" in result.output) + asserts.predicate_from_cli( + result, f"{first_sha} - registered" in common_kit.escape_ansi(result.output) + ) runner = CliRunner() result = runner.invoke(cli.add, ["0@p"]) asserts.predicate_from_cli(result, result.exit_code == 1) - asserts.predicate_from_cli(result, f"originates from minor version '{parent}'" in result.output) + asserts.predicate_from_cli( + result, f"Origin version - {parent}" in common_kit.escape_ansi(result.output) + ) # Check that force work as intented monkeypatch.setattr("click.confirm", lambda _: True) runner = CliRunner() result = runner.invoke(cli.add, ["--force", "0@p"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, f"'{second_sha}' successfully registered" in result.output) + asserts.predicate_from_cli( + result, f"{second_sha} - registered" in common_kit.escape_ansi(result.output) + ) result = runner.invoke(cli.add, ["10@p"]) asserts.predicate_from_cli(result, result.exit_code == 2) @@ -1518,13 +1526,13 @@ def test_add_tag_range(pcs_with_root, valid_profile_pool): result = runner.invoke(cli.add, ["10@p-0@p"]) asserts.predicate_from_cli(result, result.exit_code == 0) asserts.predicate_from_cli( - result, "successfully registered 0 profiles in index" in result.output + result, "Registration succeeded for - 0 profiles" in common_kit.escape_ansi(result.output) ) result = runner.invoke(cli.add, ["0@p-10@p"]) asserts.predicate_from_cli(result, result.exit_code == 0) asserts.predicate_from_cli( - result, "successfully registered 2 profiles in index" in result.output + result, "Registration succeeded for - 2 profiles" in common_kit.escape_ansi(result.output) ) # Nothing should remain! @@ -1540,7 +1548,8 @@ def test_remove_tag(pcs_single_prof): runner = CliRunner() result = runner.invoke(cli.remove, ["0@i"]) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "1/1 deregistered" in result.output) + asserts.predicate_from_cli(result, "1/1" in result.output) + asserts.predicate_from_cli(result, "deregistered" in result.output) def test_remove_tag_range(pcs_full): diff --git a/tests/test_collect.py b/tests/test_collect.py index 8538028d3..af90f1892 100644 --- a/tests/test_collect.py +++ b/tests/test_collect.py @@ -136,7 +136,7 @@ def test_collect_complexity(monkeypatch, pcs_with_root, complexity_collect_job): + samplings, ) asserts.predicate_from_cli(result, result.exit_code == 0) - asserts.predicate_from_cli(result, "stored profile" in result.output) + asserts.predicate_from_cli(result, "Stored generated profile" in result.output) def test_collect_complexity_errors(monkeypatch, pcs_with_root, complexity_collect_job): @@ -370,7 +370,7 @@ def patched_call(cmd, *_, **__): # Try issue, when the libmalloc library is not there run.run_single_job(*memory_collect_job) out, _ = capsys.readouterr() - assert "Missing compiled dynamic library" in out + assert "Dynamic library libmalloc - not found" in common_kit.escape_ansi(out) # Try compiling again, but now with an error during the compilation return_code_for_make = 1 @@ -387,7 +387,7 @@ def patched_parse(*args): monkeypatch.setattr("perun.collect.memory.parsing.parse_log", patched_parse) run.run_single_job(*memory_collect_job) _, err = capsys.readouterr() - assert "Problems while parsing log file: " in err + assert "Could not parse the log file due to" in err def patched_run(_): return 42, "dummy" @@ -462,7 +462,7 @@ def test_collect_time(monkeypatch, pcs_with_root, capsys): # Assert outputs out, err = capsys.readouterr() assert err == "" - assert "Successfully collected data from echo" in out + assert "Collecting by time from `echo hello` - succeeded" in common_kit.escape_ansi(out) # Assert that just one profile was created # + 1 for index @@ -496,13 +496,14 @@ def test_integrity_tests(capsys): mock_report = RunnerReport(complexity, "postprocessor", {"profile": {}}) run.check_integrity_of_runner(complexity, "postprocessor", mock_report) out, err = capsys.readouterr() - assert "warning: complexity is missing postprocess() function" in out + + assert "complexity is missing postprocess() function" in out assert "" == err mock_report = RunnerReport(complexity, "collector", {}) run.check_integrity_of_runner(complexity, "collector", mock_report) out, err = capsys.readouterr() - assert "warning: collector complexity does not return any profile" + assert "collector complexity does not return any profile" assert "" == err @@ -557,4 +558,4 @@ def collect_firing_sigint(report, phase): status = run.run_single_job(["echo"], ["hello"], ["time"], [], [head]) assert status == CollectStatus.ERROR out, err = capsys.readouterr() - assert "fatal: while collecting by time: received signal" in err + assert "while collecting by time: received signal" in err diff --git a/tests/test_config.py b/tests/test_config.py index 095a7aa7d..5ec036550 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -156,7 +156,7 @@ def test_inits(capsys, tmpdir): other_local_config = config.local(str(other_config)) out, _ = capsys.readouterr() - assert "warn" in out + assert "warning" in out.lower() assert other_local_config.data == {} assert other_local_config.path == str(other_config) assert other_local_config.type == "local" diff --git a/tests/test_init.py b/tests/test_init.py index 3dc675fb8..0b7f37e42 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -102,7 +102,10 @@ def test_no_params_exists_pcs_in_same_dir(capsys): # Check if user was warned, that at the given path, the perun pcs was reinitialized out, _ = capsys.readouterr() - assert f"Reinitialized existing Perun repository in {pcs_path}" in out + assert ( + "Reinitialized existing perun repository" in common_kit.escape_ansi(out) + and f"{pcs_path}" in out + ) @pytest.mark.usefixtures("cleandir") @@ -128,9 +131,8 @@ def test_no_params_exists_pcs_in_parent(capsys): # Assert that user was warned, there is a super perun directory out, _ = capsys.readouterr() - assert ( - out.split("\n")[0].strip() == f"warning: There exists super perun directory at {pcs_path}" - ) + assert "There exists perun directory at" in out + assert f"{pcs_path}" in out @pytest.mark.usefixtures("cleandir") @@ -174,8 +176,9 @@ def test_git_exists_already(capsys): # Capture the out and check if the message contained "Reinitialized" out, _ = capsys.readouterr() - expected = out.split("\n")[1].strip() - assert expected == f"Reinitialized existing Git repository in {pcs_path}" + assert ( + "Reinitialized existing git repository" in common_kit.escape_ansi(out) and pcs_path in out + ) @pytest.mark.usefixtures("cleandir") diff --git a/tests/test_remove.py b/tests/test_remove.py index ac6e8e20c..a177cf205 100644 --- a/tests/test_remove.py +++ b/tests/test_remove.py @@ -43,7 +43,7 @@ def test_rm_on_empty_repo(pcs_with_empty_git, capsys): # Test that nothing is printed on out and something is printed on err out, err = capsys.readouterr() assert out == "" - assert err != "" and "fatal" in err + assert err != "" and "error" in err.lower() def test_rm_no_profiles(pcs_full_no_prof, capsys): @@ -77,12 +77,12 @@ def test_rm_nonexistent(pcs_single_prof, capsys): Expecting error message and nothing removed at all """ before_count = test_utils.count_contents_on_path(pcs_single_prof.get_path()) - with pytest.raises(EntryNotFoundException) as exc: - commands.remove_from_index(["nonexistent.perf"], None) - assert "'nonexistent.perf' not found in the index" in str(exc.value) - + commands.remove_from_index(["nonexistent.perf"], None) out, _ = capsys.readouterr() - assert out == "" + + assert "not found" in out + assert "deregistered" not in out and "deleted" not in out + assert "Nothing to remove" in out # Assert that nothing was removed after_count = test_utils.count_contents_on_path(pcs_single_prof.get_path()) diff --git a/tests/test_status.py b/tests/test_status.py index f65e4abc7..352d8eaa8 100644 --- a/tests/test_status.py +++ b/tests/test_status.py @@ -218,7 +218,7 @@ def test_status_on_empty_repo(pcs_with_empty_git, capsys): # Test that nothing is printed on out and something is printed on err out, err = capsys.readouterr() assert out == "" - assert err != "" and "fatal" in err + assert err != "" and "error" in err.lower() def test_status_no_pending(pcs_full, capsys, stored_profile_pool): diff --git a/tests/test_store.py b/tests/test_store.py index 14a0b0222..69d61f4f6 100644 --- a/tests/test_store.py +++ b/tests/test_store.py @@ -29,7 +29,7 @@ def mocked_read_int(_): with pytest.raises(SystemExit): print(list(index.walk_index(index_handle))) _, err = capsys.readouterr() - assert "fatal: malformed index file: too many or too few objects registered in index" in err + assert "malformed index file: too many or too few objects registered in index" in err monkeypatch.setattr("perun.logic.store.read_int_from_handle", old_read_int) monkeypatch.setattr("perun.logic.index.INDEX_VERSION", index.INDEX_VERSION - 1) diff --git a/tests/test_utils.py b/tests/test_utils.py index 589d26464..e3a3168ff 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -140,7 +140,7 @@ def test_paging_and_config(monkeypatch, capsys): assert commands.turn_off_paging_wrt_config("status") assert commands.turn_off_paging_wrt_config("log") out, _ = capsys.readouterr() - assert "warn" in out and "missing ``general.paging``" in out + assert "warning" in out.lower() and "missing ``general.paging``" in out def test_binaries_lookup(): @@ -297,7 +297,7 @@ def test_predicates(capsys): def test_log_common(capsys): log.skipped() out, _ = capsys.readouterr() - assert "[\x1b[1m\x1b[30mskip\x1b[0m]\n" in out + assert "\x1b[1m\x1b[37mskipped\x1b[0m\n" in out def test_logger(capsys):