Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enhance the output of the Perun #174

Merged
merged 25 commits into from
Feb 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions perun/check/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ def degradation_in_minor(
:param bool quiet: if set to true then nothing will be printed
:returns: list of found changes
"""
log.major_info(f"Checking Version {minor_version}")
selection: AbstractBaseSelection = pcs.selection()
minor_version_info = pcs.vcs().get_minor_version_info(minor_version)

Expand Down Expand Up @@ -178,13 +179,14 @@ def degradation_in_minor(


@log.print_elapsed_time
@decorators.phase_function("check whole repository")
def degradation_in_history(head: str) -> list[tuple[DegradationInfo, str, str]]:
"""Walks through the minor version starting from the given head, checking for degradation.

:param str head: starting point of the checked history for degradation.
:returns: tuple (degradation result, degradation location, degradation rate)
"""
log.major_info("Checking Whole History")
log.minor_info("This might take a while")
detected_changes = []
version_selection: AbstractBaseSelection = pcs.selection()
with log.History(head) as history:
Expand Down Expand Up @@ -269,7 +271,6 @@ def run_degradation_check(


@log.print_elapsed_time
@decorators.phase_function("check two profiles")
def degradation_between_files(
baseline_file: Profile,
target_file: Profile,
Expand All @@ -286,6 +287,7 @@ def degradation_between_files(
:param bool force: force profiles check despite different configurations
:returns None: no return value
"""
log.major_info("Checking two compatible profiles")
# First check if the configurations are compatible
baseline_config = profiles.to_config_tuple(baseline_file)
target_config = profiles.to_config_tuple(target_file)
Expand All @@ -309,7 +311,7 @@ def degradation_between_files(
pcs.get_object_directory(), target_minor_version, detected_changes
)
log.newline()
log.print_list_of_degradations(detected_changes, models_strategy)
log.print_list_of_degradations(detected_changes)
log.print_short_summary_of_degradations(detected_changes)


Expand Down
9 changes: 5 additions & 4 deletions perun/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,9 +282,10 @@ def init(dst: str, configure: bool, config_template: str, **kwargs: Any) -> None
# Run the interactive configuration of the local perun repository (populating .yml)
configure_local_perun(dst)
else:
msg = "\nIn order to automatically run jobs configure the matrix at:\n"
msg += "\n" + (" " * 4) + ".perun/local.yml\n"
perun_log.quiet_info(msg)
perun_log.minor_status(
"Local instance of Perun can now be (manually) configured",
status=f"{perun_log.path_style('.perun/local.yml')}",
)
except PermissionError:
perun_log.error("writing to shared config 'shared.yml' requires root permissions")
except (ExternalEditorErrorException, MissingConfigSectionException):
Expand Down Expand Up @@ -478,7 +479,7 @@ def remove(
try:
commands.remove_from_index(from_index_generator, minor)
commands.remove_from_pending(from_jobs_generator)
except (NotPerunRepositoryException, EntryNotFoundException) as exception:
except (NotPerunRepositoryException) as exception:
perun_log.error(f"could not remove profiles: {str(exception)}")


Expand Down
24 changes: 9 additions & 15 deletions perun/cli_groups/check_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,21 +114,20 @@ def check_group(**_: Any) -> None:
str(perun_config.lookup_key_recursively("degradation.log_collect", "false"))
)
if should_precollect:
log.major_info("Precollecting Profiles")
collect_before_check = log.in_color("degradation.collect_before_check", "white", ["bold"])
true = log.in_color("true", "green", ["bold"])
log.info(f"{collect_before_check} is set to {true}. ", end="")
log.info("Missing profiles will be freshly collected with respect to the ", end="")
log.info(
"nearest job matrix (run `perun config edit` to modify the underlying job matrix)."
)
log.minor_success(f"{log.highlight(collect_before_check)}", "true")
log.minor_info("Missing profiles will be now collected")
log.increase_indent()
log.minor_info(f"Run {log.cmd_style('perun config edit')} to modify the job matrix")
log.decrease_indent()
if precollect_to_log:
log_directory = log.in_color(pcs.get_log_directory(), "white", ["bold"])
log.info(
f"The progress of the pre-collect phase will be stored in logs at {log_directory}."
log.minor_status(
"The progress will be stored in log", status=log.path_style(log_directory)
)
else:
black_hole = log.in_color("black hole", "white", ["bold"])
log.info(f"The progress of the pre-collect phase will be redirected to {black_hole}.")
log.minor_info(f"The progress will be redirected to {log.highlight('black hole')}")


@check_group.command("head")
Expand All @@ -152,7 +151,6 @@ def check_head(head_minor: str = "HEAD") -> None:

By default, the ``hash`` corresponds to the `head` of the current project.
"""
log.newline()
check.degradation_in_minor(head_minor)


Expand All @@ -174,9 +172,6 @@ def check_all(minor_head: str = "HEAD") -> None:
and runs the performance check according to the set of strategies set in the configuration
(see :ref:`degradation-config` or :doc:`config`).
"""
log.info(
"[!] Running the degradation checks on the whole VCS history. This might take a while!\n"
)
check.degradation_in_history(minor_head)


Expand Down Expand Up @@ -239,7 +234,6 @@ def check_profiles(

"""
assert ctx.parent is not None and f"impossible happened: {ctx} has no parent"
log.newline()
check.degradation_between_files(
baseline_profile,
target_profile,
Expand Down
20 changes: 14 additions & 6 deletions perun/collect/bounds/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,22 +34,25 @@ def before(sources: list[str], **kwargs: Any) -> tuple[CollectStatus, str, dict[

$ clang-3.5 -g -emit-llvm -c ${sources}
"""
log.major_info("Compiling to LLVM", no_title=True)
pwd = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bin")
include_path = os.path.join(pwd, "include")
clang_bin = (
_CLANG_COMPILER if shutil.which(_CLANG_COMPILER) else os.path.join(pwd, _CLANG_COMPILER)
)
log.minor_status(f"{log.highlight('clang')} found", status=log.path_style(clang_bin))
cmd = " ".join([clang_bin] + ["-I", include_path] + _CLANG_COMPILATION_PARAMS + list(sources))
log.info(f"Compiling source codes: {','.join(sources)}")

log.minor_status("Compiling source codes", status=f"{','.join(sources)}")
my_env = os.environ.copy()
my_env["LD_LIBRARY_PATH"] = pwd
try:
commands.run_safely_external_command(cmd, check_results=True, env=my_env, quiet=False)
except SubprocessError as sub_err:
log.failed()
log.minor_fail("Compiling to LLVM")
return CollectStatus.ERROR, str(sub_err), dict(kwargs)

log.done()
log.minor_success("Compiling to LLVM")
return CollectStatus.OK, "status_message", dict(kwargs)


Expand All @@ -61,13 +64,17 @@ def collect(sources: list[str], **kwargs: Any) -> tuple[CollectStatus, str, dict

Finally, parses the output of Loopus into a profile
"""
log.major_info("Running Loopus")
pwd = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bin")
loopus_bin = os.path.join(pwd, "loopus")
source_filenames = [os.path.splitext(os.path.split(src)[1])[0] + _LLVM_EXT for src in sources]
my_env = os.environ.copy()
my_env["LD_LIBRARY_PATH"] = pwd

log.info(f"Running Loopus on compiled source codes: {' '.join(source_filenames)}")
log.minor_status(f"{log.highlight('Loopus')} found at", status=log.path_style(loopus_bin))
log.minor_status(
"Running Loopus on compiled source codes", status=f"{' '.join(source_filenames)}"
)

before_analysis = systime.time()
try:
Expand All @@ -79,17 +86,18 @@ def collect(sources: list[str], **kwargs: Any) -> tuple[CollectStatus, str, dict
returned_out, _ = commands.run_safely_external_command(cmd, check_results=True, env=my_env)
out = returned_out.decode("utf-8")
except SubprocessError as sub_err:
log.failed()
log.minor_fail("Collection of bounds")
return CollectStatus.ERROR, str(sub_err), dict(kwargs)
overall_time = systime.time() - before_analysis
log.minor_success("Collection of bounds")

# Parse the out, but first fix the one file analysis, which has different format
if len(sources) == 1:
out = f"file {source_filenames[0]}\n" + out
source_map = {bc: src for (bc, src) in zip(source_filenames, sources)}
resources = parser.parse_output(out, source_map)
log.minor_success("Parsing collected output")

log.done()
return (
CollectStatus.OK,
"status message",
Expand Down
61 changes: 33 additions & 28 deletions perun/collect/complexity/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ def before(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, d
string as a status message, mainly for error states
dict of modified kwargs with 'cmd' value representing the executable
"""
log.major_info("Preparing the instrumented executable")
try:
# Validate the inputs and dependencies first
_validate_input(**kwargs)
Expand All @@ -87,28 +88,24 @@ def before(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, d
)

# Create the configuration cmake and build the configuration executable
log.cprint("Building the configuration executable...", "white")
cmake_path = makefiles.create_config_cmake(target_dir, files)
exec_path = makefiles.build_executable(cmake_path, makefiles.CMAKE_CONFIG_TARGET)
log.done()
log.minor_success("Building the configuration executable")

# Extract some configuration data using the configuration executable
log.cprint("Extracting the configuration...", "white")
function_sym = symbols.extract_symbols(exec_path)
include_list, exclude_list, runtime_filter = symbols.filter_symbols(function_sym, rules)
log.done()
log.minor_success("Extracting the configuration")

# Create the collector cmake and build the collector executable
log.cprint("Building the collector executable...", "white")
cmake_path = makefiles.create_collector_cmake(target_dir, files, exclude_list)
exec_path = makefiles.build_executable(cmake_path, makefiles.CMAKE_COLLECT_TARGET)
log.done()
log.minor_success("Building the collector executable")

# Create the internal configuration file
log.cprint("Creating runtime config...", "white")
configurator.create_runtime_config(exec_path, runtime_filter, include_list, kwargs)
executable.cmd = exec_path
log.done()
log.minor_success("Creating the runtime config")
return CollectStatus.OK, _COLLECTOR_STATUS_MSG[0], dict(kwargs)

# The "expected" exception types
Expand All @@ -119,7 +116,7 @@ def before(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, d
UnicodeError,
exceptions.UnexpectedPrototypeSyntaxError,
) as exception:
log.failed()
log.minor_fail("Preparing the instrumented executable")
return CollectStatus.ERROR, str(exception), dict(kwargs)


Expand All @@ -133,15 +130,15 @@ def collect(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str,
string as a status message, mainly for error states
dict of unmodified kwargs
"""
log.cprint("Running the collector...", "white")
log.major_info("Collecting Data")
collect_dir = os.path.dirname(executable.cmd)
# Run the command and evaluate the return code
try:
commands.run_safely_external_command(str(executable), cwd=collect_dir)
log.done()
log.minor_success("Collection of data")
return CollectStatus.OK, _COLLECTOR_STATUS_MSG[0], dict(kwargs)
except (CalledProcessError, IOError) as err:
log.failed()
log.minor_fail("Collection of data")
return (
CollectStatus.ERROR,
_COLLECTOR_STATUS_MSG[21] + f": {str(err)}",
Expand All @@ -159,11 +156,12 @@ def after(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, di
string as a status message, mainly for error states
dict of modified kwargs with 'profile' value representing the resulting profile
"""
log.major_info("Creating profile")
# Get the trace log path
log.cprint("Starting the post-processing phase...", "white")
internal_filename = kwargs.get("internal_data_filename", configurator.DEFAULT_DATA_FILENAME)
data_path = os.path.join(os.path.dirname(executable.cmd), internal_filename)
address_map = symbols.extract_symbol_address_map(executable.cmd)
log.minor_success("Symbol address map", "extracted")

resources: list[dict[str, Any]] = []
call_stack: list[ProfileRecord] = []
Expand All @@ -183,14 +181,15 @@ def after(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, di
err_msg += (
call_stack[-1].func + ", " + call_stack[-1].action if call_stack else "empty"
)
log.failed()
log.minor_fail("Parsing log")
return CollectStatus.ERROR, err_msg, dict(kwargs)

# Get the first and last record timestamps to determine the profiling time
profile_end = int(record.timestamp)
if is_first_line:
is_first_line = False
profile_start = int(record.timestamp)
log.minor_success("Parsing log")

# Update the profile dictionary
kwargs["profile"] = {
Expand All @@ -199,7 +198,6 @@ def after(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str, di
"resources": resources,
}
}
log.done()
return CollectStatus.OK, _COLLECTOR_STATUS_MSG[0], dict(kwargs)


Expand Down Expand Up @@ -241,27 +239,34 @@ def _process_file_record(

def _check_dependencies() -> None:
"""Validates that dependencies (cmake and make) are met"""
log.cprint("Checking dependencies...", "white")
log.newline()
log.cprint("make:", "white")
log.info("\t", end="")
log.minor_info("Checking dependencies")
log.increase_indent()
all_found = True
if not shutil.which("make"):
log.no()
all_found = False
log.minor_fail("make", "not found")
log.error(
"Could not find 'make'. Please, install the makefile package.",
recoverable=True,
)
else:
log.yes()
log.cprint("cmake:", "white")
log.info("\t", end="")
log.minor_success("make", "found")
if not shutil.which("cmake"):
log.no()
log.error("Could not find 'cmake'. Please, install build-essentials and cmake packages.")
all_found = False
log.minor_fail("cmake", "not found")
log.error(
"Could not find 'cmake'. Please, install `build-essentials` and `cmake` packages.",
recoverable=True,
)
else:
log.minor_success("cmake", "found")
log.decrease_indent()

if all_found:
log.minor_success("dependencies", "all found")
else:
log.yes()
log.newline()
log.done()
log.minor_fail("dependencies", "not found")
log.error("Some dependencies were not satisfied: complexity cannot be run")


def _validate_input(**kwargs: Any) -> None:
Expand Down
6 changes: 4 additions & 2 deletions perun/collect/memory/parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,13 @@ def parse_allocation_location(trace: list[dict[str, Any]]) -> dict[str, Any]:
:param list trace: list representing stack call trace
:returns dict: first user's call to allocation
"""
result = {}
for call in trace or []:
source = call["source"]
if source != "unreachable":
return call
return {}
result = call
break
return result


def parse_resources(allocation: list[str]) -> dict[str, Any]:
Expand Down
Loading
Loading