diff --git a/perun/cli_groups/import_cli.py b/perun/cli_groups/import_cli.py index bb5aa6bf..de282661 100755 --- a/perun/cli_groups/import_cli.py +++ b/perun/cli_groups/import_cli.py @@ -130,3 +130,30 @@ def from_stacks(ctx: click.Context, imported: list[str], **kwargs: Any) -> None: """ kwargs.update(ctx.obj) imports.import_perf_from_stack(imported, **kwargs) + + +@import_group.group("elk") +@click.pass_context +def elk_group(ctx: click.Context, **kwargs: Any) -> None: + """Imports Perun profiles from elk results + + By ELK we mean Elasticsearch Stack (Elasticsearch, Logstash, Kibana) + + We assume the data are already flattened and are in form of: + + [{key: value, ...}, ...] + + The command supports profiles collected in: + + 1. JSON format: files, that are extracted from ELK or are stored using format compatible with ELK. + """ + ctx.obj.update(kwargs) + + +@elk_group.command("json") +@click.argument("imported", nargs=-1, required=True) +@click.pass_context +def from_json(ctx: click.Context, imported: list[str], **kwargs: Any) -> None: + """Imports Perun profiles from json compatible with elk infrastructure""" + kwargs.update(ctx.obj) + imports.import_elk_from_json(imported, **kwargs) diff --git a/perun/profile/convert.py b/perun/profile/convert.py index 1ccc1e6c..0b8a6ef4 100644 --- a/perun/profile/convert.py +++ b/perun/profile/convert.py @@ -142,9 +142,9 @@ def to_flame_graph_format( for alloc in snapshot: if "subtype" not in alloc.keys() or alloc["subtype"] != "free": # Workaround for total time used in some collectors, so it is not outputted - if alloc["uid"] != "%TOTAL_TIME%": + if alloc["uid"] != "%TOTAL_TIME%" and profile_key in alloc: stack_str = to_uid(alloc["uid"]) + ";" - for frame in alloc["trace"][::-1]: + for frame in alloc.get("trace", [])[::-1]: line = to_uid(frame, minimize) stack_str += line + ";" if stack_str and stack_str.endswith(";"): diff --git a/perun/profile/factory.py b/perun/profile/factory.py index 5e06db89..d16f125a 100644 --- a/perun/profile/factory.py +++ b/perun/profile/factory.py @@ -54,6 +54,10 @@ class Profile(MutableMapping[str, Any]): "address", "timestamp", "exclusive", + "metric.iteration", + "metric.value", + "metric.score-value", + "metric.percentile", } persistent = {"trace", "type", "subtype", "uid", "location"} diff --git a/perun/profile/imports.py b/perun/profile/imports.py index 6e8784cd..6e07f56e 100755 --- a/perun/profile/imports.py +++ b/perun/profile/imports.py @@ -3,14 +3,15 @@ from __future__ import annotations # Standard Imports -from typing import Any, Optional, Iterator, Callable +from collections import defaultdict +from dataclasses import dataclass, field, asdict from pathlib import Path -import json +from typing import Any, Optional, Iterator, Callable import csv +import json import os -import subprocess import statistics -from dataclasses import dataclass, field, asdict +import subprocess # Third-Party Imports import gzip @@ -20,7 +21,7 @@ from perun.profile import helpers as p_helpers from perun.logic import commands, index, pcs from perun.utils import log, streams -from perun.utils.common import script_kit +from perun.utils.common import script_kit, common_kit from perun.utils.external import commands as external_commands, environment from perun.utils.structs import MinorVersion from perun.profile.factory import Profile @@ -40,6 +41,15 @@ class ImportProfileSpec: class ImportedProfiles: + """ + Note: I would reconsider this class or refactor it, removing the logical elements, it obfuscates the logic a little + and makes the functions less readable (there are not streams/pipes as is most of the logic/perun); I for one am + rather "fan" of generic functions that takes structures and returns structure than classes with methods/logic. + TODO: the import-dir could be removed by extracting this functionality to command-line callback and massage + the paths during the CLI parsing; hence assuming that the paths are correct when importing. I think the parameter + only complicates the code. + """ + __slots__ = "import_dir", "stats", "profiles" def __init__(self, targets: list[str], import_dir: str | None, stats_info: str | None) -> None: @@ -122,6 +132,11 @@ def _add_imported_profile(self, target: list[str]) -> None: def load_file(filepath: Path) -> str: + """Tests if the file is packed by gzip and unpacks it, otherwise reads it as a text file + + :param filepath: path with source file + :return: the content of the file + """ if filepath.suffix.lower() == ".gz": with open(filepath, "rb") as f: header = f.read(2) @@ -146,7 +161,7 @@ def get_machine_info(machine_info: Optional[str] = None) -> dict[str, Any]: return environment.get_machine_specification() -def import_profile( +def import_perf_profile( profiles: ImportedProfiles, resources: list[dict[str, Any]], minor_version: MinorVersion, @@ -155,6 +170,16 @@ def import_profile( save_to_index: bool = False, **kwargs: Any, ) -> None: + """Constructs the profile for perf-collected data and saves them to jobs or index + + :param profiles: list of to-be-imported profiles + :param resources: list of parsed resources + :param minor_version: minor version corresponding to the imported profiles + :param machine_info: additional dictionary with machine specification + :param with_sudo: indication whether the data were collected with sudo + :param save_to_index: indication whether we should save the imported profiles to index + :param kwargs: rest of the paramters + """ prof = Profile( { "global": { @@ -191,6 +216,16 @@ def import_profile( ) prof.update({"postprocessors": []}) + save_imported_profile(prof, save_to_index, minor_version) + + +def save_imported_profile(prof: Profile, save_to_index: bool, minor_version: MinorVersion) -> None: + """Saves the imported profile either to index or to pending jobs + + :param prof: imported profile + :param minor_version: minor version corresponding to the imported profiles + :param save_to_index: indication whether we should save the imported profiles to index + """ full_profile_name = p_helpers.generate_profile_name(prof) profile_directory = pcs.get_job_directory() full_profile_path = os.path.join(profile_directory, full_profile_name) @@ -216,7 +251,18 @@ def import_perf_from_record( with_sudo: bool = False, **kwargs: Any, ) -> None: - """Imports profile collected by `perf record`""" + """Imports profile collected by `perf record` + + It does some black magic in ImportedProfiles probably, then for each filename it runs the + perf script + parser script to generate the profile. + + :param imported: list of files with imported data + :param import_dir: different directory for importing the profiles + :param stats_info: additional statistics collected for the profile (i.e. non-resource types) + :param minor_version: minor version corresponding to the imported profiles + :param with_sudo: indication whether the data were collected with sudo + :param kwargs: rest of the paramters + """ parse_script = script_kit.get_script("stackcollapse-perf.pl") minor_version_info = pcs.vcs().get_minor_version_info(minor_version) @@ -239,7 +285,7 @@ def import_perf_from_record( log.error(f"Cannot load data due to: {err}") resources.extend(parser.parse_events(out.decode("utf-8").split("\n"))) log.minor_success(log.path_style(str(imported_file.path)), "imported") - import_profile(profiles, resources, minor_version_info, with_sudo=with_sudo, **kwargs) + import_perf_profile(profiles, resources, minor_version_info, with_sudo=with_sudo, **kwargs) @vcs_kit.lookup_minor_version @@ -250,7 +296,17 @@ def import_perf_from_script( minor_version: str, **kwargs: Any, ) -> None: - """Imports profile collected by `perf record; perf script`""" + """Imports profile collected by `perf record | perf script` + + It does some black magic in ImportedProfiles probably, then for each filename it runs the + parser script to generate the profile. + + :param imported: list of files with imported data + :param import_dir: different directory for importing the profiles + :param stats_info: additional statistics collected for the profile (i.e. non-resource types) + :param minor_version: minor version corresponding to the imported profiles + :param kwargs: rest of the paramters + """ parse_script = script_kit.get_script("stackcollapse-perf.pl") minor_version_info = pcs.vcs().get_minor_version_info(minor_version) @@ -263,7 +319,7 @@ def import_perf_from_script( log.minor_success(f"Raw data from {log.path_style(str(imported_file.path))}", "collected") resources.extend(parser.parse_events(out.decode("utf-8").split("\n"))) log.minor_success(log.path_style(str(imported_file.path)), "imported") - import_profile(profiles, resources, minor_version_info, **kwargs) + import_perf_profile(profiles, resources, minor_version_info, **kwargs) @vcs_kit.lookup_minor_version @@ -274,7 +330,16 @@ def import_perf_from_stack( minor_version: str, **kwargs: Any, ) -> None: - """Imports profile collected by `perf record; perf script | stackcollapse-perf.pl`""" + """Imports profile collected by `perf record | perf script` + + It does some black magic in ImportedProfiles probably, then for each filename parses the files. + + :param imported: list of files with imported data + :param import_dir: different directory for importing the profiles + :param stats_info: additional statistics collected for the profile (i.e. non-resource types) + :param minor_version: minor version corresponding to the imported profiles + :param kwargs: rest of the paramters + """ minor_version_info = pcs.vcs().get_minor_version_info(minor_version) profiles = ImportedProfiles(imported, import_dir, stats_info) @@ -284,4 +349,156 @@ def import_perf_from_stack( out = load_file(imported_profile.path) resources.extend(parser.parse_events(out.split("\n"))) log.minor_success(log.path_style(str(imported_profile.path)), "imported") - import_profile(profiles, resources, minor_version_info, **kwargs) + import_perf_profile(profiles, resources, minor_version_info, **kwargs) + + +def extract_machine_info_from_metadata(metadata: dict[str, Any]) -> dict[str, Any]: + """Extracts the parts of the profile, that corresponds to machine info + + Note that not many is collected from the ELK formats and it can vary greatly, + hence, most of the machine specification and environment should be in metadata instead. + + :param metadata: metadata extracted from the ELK profiles + :return: machine info extracted from the profiles + """ + machine_info = { + "architecture": metadata.get("machine.arch", "?"), + "system": metadata.get("machine.os", "?").capitalize(), + "release": metadata.get("extra.machine.platform", "?"), + "host": metadata.get("machine.hostname", "?"), + "cpu": { + "physical": "?", + "total": metadata.get("machine.cpu-cores", "?"), + "frequency": "?", + }, + "memory": { + "total_ram": metadata.get("machine.ram", "?"), + "swap": "?", + }, + } + + machine_info["boot_info"] = "?" + machine_info["mem_details"] = {} + machine_info["cpu_details"] = [] + return machine_info + + +def import_elk_profile( + resources: list[dict[str, Any]], + metadata: dict[str, Any], + minor_version: MinorVersion, + save_to_index: bool = False, + **kwargs: Any, +) -> None: + """Constructs the profile for elk-stored data and saves them to jobs or index + + :param resources: list of parsed resources + :param metadata: parts of the profiles that will be stored as metadata in the profile + :param minor_version: minor version corresponding to the imported profiles + :param save_to_index: indication whether we should save the imported profiles to index + :param kwargs: rest of the paramters + """ + prof = Profile( + { + "global": { + "time": "???", + "resources": resources, + } + } + ) + prof.update({"origin": minor_version.checksum}) + prof.update({"metadata": metadata}) + prof.update({"machine": extract_machine_info_from_metadata(metadata)}) + prof.update( + { + "header": { + "type": "time", + "cmd": kwargs.get("cmd", ""), + "exitcode": "?", + "workload": kwargs.get("workload", ""), + "units": {"time": "sample"}, + } + } + ) + prof.update( + { + "collector_info": { + "name": "???", + "params": {}, + } + } + ) + prof.update({"postprocessors": []}) + + save_imported_profile(prof, save_to_index, minor_version) + + +def extract_from_elk( + elk_query: list[dict[str, Any]] +) -> tuple[list[dict[str, Any]], dict[str, Any]]: + """For the given elk query, extracts resources and metadata. + + For metadata we consider any key that has only single value through the profile, + and is not linked to keywords `metric` or `benchmarking`. + For resources we consider anything that is not identified as metadata + + :param elk_query: query from the elk in form of list of resource + :return: list of resources and metadata + """ + res_counter = defaultdict(set) + for res in elk_query: + for key, val in res.items(): + res_counter[key].add(val) + metadata_keys = { + k + for (k, v) in res_counter.items() + if not k.startswith("metric") and not k.startswith("benchmarking") and len(v) == 1 + } + + metadata = {k: res_counter[k].pop() for k in metadata_keys} + resources = [ + { + k: common_kit.try_convert(v, [int, float, str]) + for k, v in res.items() + if k not in metadata_keys + } + for res in elk_query + ] + # We register uid + for res in resources: + res["uid"] = res["metric.name"] + res["benchmarking.time"] = res["benchmarking.end-ts"] - res["benchmarking.start-ts"] + res.pop("benchmarking.end-ts") + res.pop("benchmarking.start-ts") + return resources, metadata + + +@vcs_kit.lookup_minor_version +def import_elk_from_json( + imported: list[str], + minor_version: str, + **kwargs: Any, +) -> None: + """Imports the ELK stored data from the json data. + + The loading expects the json files to be in form of `{'queries': []}`. + + :param imported: list of filenames with elk data. + :param minor_version: minor version corresponding to the imported profiles + :param kwargs: rest of the paramters + """ + minor_version_info = pcs.vcs().get_minor_version_info(minor_version) + + resources: list[dict[str, Any]] = [] + metadata: dict[str, Any] = {} + for imported_file in imported: + with open(imported_file, "r") as imported_handle: + imported_json = json.load(imported_handle) + assert ( + "queries" in imported_json.keys() + ), "expected the JSON to contain list of dictionaries in 'queries' key" + r, m = extract_from_elk(imported_json["queries"]) + resources.extend(r) + metadata.update(m) + log.minor_success(log.path_style(str(imported_file)), "imported") + import_elk_profile(resources, metadata, minor_version_info, **kwargs) diff --git a/perun/templates/diff_view_flamegraph.html.jinja2 b/perun/templates/diff_view_flamegraph.html.jinja2 index fc5c85bb..a3e3540f 100755 --- a/perun/templates/diff_view_flamegraph.html.jinja2 +++ b/perun/templates/diff_view_flamegraph.html.jinja2 @@ -103,6 +103,10 @@
 
{{ profile_overview.overview_table('toggleLeftProfileCollapse', 'left-profile-info', lhs_stats, rhs_stats, "Profile Stats") }}
 
+ {%- if rhs_metadata%} + {{ profile_overview.overview_table('toggleLeftMetadataCollapse', 'left-metadata-info', lhs_metadata, rhs_metadata, "Profile Metadata") }} +
 
+ {%- endif %}
@@ -111,6 +115,10 @@
 
{{ profile_overview.overview_table('toggleRightProfileCollapse', 'right-profile-info', rhs_stats, lhs_stats, "Profile Stats") }}
 
+ {%- if rhs_metadata%} + {{ profile_overview.overview_table('toggleRightMetadata', 'right-metadata-info', rhs_metadata, lhs_metadata, "Profile Metadata") }} +
 
+ {%- endif %}
@@ -167,8 +175,10 @@ diff --git a/perun/templates/diff_view_report.html.jinja2 b/perun/templates/diff_view_report.html.jinja2 index afb3266a..43f018ed 100755 --- a/perun/templates/diff_view_report.html.jinja2 +++ b/perun/templates/diff_view_report.html.jinja2 @@ -179,6 +179,10 @@
 
{{ profile_overview.overview_table('toggleLeftProfileCollapse', 'left-profile-info', lhs_stats, rhs_stats, "Profile Stats") }}
 
+ {%- if rhs_metadata%} + {{ profile_overview.overview_table('toggleLeftMetadataCollapse', 'left-metadata-info', lhs_metadata, rhs_metadata, "Profile Metadata") }} +
 
+ {%- endif %}
@@ -187,6 +191,10 @@
 
{{ profile_overview.overview_table('toggleRightProfileCollapse', 'right-profile-info', rhs_stats, lhs_stats, "Profile Stats") }}
 
+ {%- if rhs_metadata%} + {{ profile_overview.overview_table('toggleRightMetadata', 'right-metadata-info', rhs_metadata, lhs_metadata, "Profile Metadata") }} +
 
+ {%- endif %}
@@ -510,8 +518,10 @@ {{ profile_overview.toggle_script('toggleLeftCollapse', 'left-info') }} {{ profile_overview.toggle_script('toggleLeftProfileCollapse', 'left-profile-info') }} + {{ profile_overview.toggle_script('toggleLeftMetadataCollapse', 'left-metadata-info') }} {{ profile_overview.toggle_script('toggleRightCollapse', 'right-info') }} {{ profile_overview.toggle_script('toggleRightProfileCollapse', 'right-profile-info') }} + {{ profile_overview.toggle_script('toggleRightMetadataCollapse', 'right-metadata-info') }} {% for index in range(0, stat_list|length ) %} {{ widgets.toggle_help('toggleFlameHelp_' + index|string, 'flame-help-' + index|string) }} {% endfor %} @@ -1306,8 +1316,10 @@ toggleSankeyHelp(); toggleLeftCollapse(); toggleLeftProfileCollapse(); + toggleLeftMetadataCollapse(); toggleRightCollapse(); toggleRightProfileCollapse(); + toggleRightMetadataCollapse(); diff --git a/perun/utils/common/diff_kit.py b/perun/utils/common/diff_kit.py index 06f466d8..e3479a78 100755 --- a/perun/utils/common/diff_kit.py +++ b/perun/utils/common/diff_kit.py @@ -258,3 +258,21 @@ def generate_headers( lhs_header = generate_header(lhs_profile) rhs_header = generate_header(rhs_profile) return generate_diff_of_headers(lhs_header, rhs_header) + + +def generate_metadata( + lhs_profile: Profile, rhs_profile: Profile +) -> tuple[list[tuple[str, Any, str]], list[tuple[str, Any, str]]]: + """Generates metadata for lhs and rhs profile + + :param lhs_profile: profile for baseline + :param rhs_profile: profile for target + :return: pair of metadata for lhs (baseline) and rhs (target) + """ + lhs_metadata = sorted( + [(k, v, "") for k, v in lhs_profile.get("metadata", {}).items()], key=lambda x: x[0] + ) + rhs_metadata = sorted( + [(k, v, "") for k, v in rhs_profile.get("metadata", {}).items()], key=lambda x: x[0] + ) + return generate_diff_of_headers(lhs_metadata, rhs_metadata) diff --git a/perun/utils/mapping.py b/perun/utils/mapping.py index 4b777046..ae37b3ba 100755 --- a/perun/utils/mapping.py +++ b/perun/utils/mapping.py @@ -12,6 +12,7 @@ # Perun Imports from perun.logic import config +from perun.utils import log def get_readable_key(key: str) -> str: @@ -20,7 +21,7 @@ def get_readable_key(key: str) -> str: :param key: transformed key :return: human readable key """ - profiles = config.runtime().get("context.profiles") + profiles = config.runtime().safe_get("context.profiles", default=[]) if key == "amount": if all( p.get("collector_info", {}).get("name") == "kperf" @@ -36,6 +37,8 @@ def get_readable_key(key: str) -> str: return "Allocated Memory [B]" if key == "ncalls": return "Number of Calls [#]" + if key == "benchmarking.time": + return "Benchmarking Time [ms]" if key in ("I Mean", "I Max", "I Min"): return key.replace("I ", "Inclusive ") + " [ms]" if key in ("E Mean", "E Max", "E Min"): @@ -55,6 +58,8 @@ def from_readable_key(key: str) -> str: return "amount" if key == "Number of Calls [#]": return "ncalls" + if key == "Benchmarking Time [ms]": + return "benchmarking.time" if key in ("Inclusive Mean [ms]", "Inclusive Max [ms]", "Inclusive Min [ms]"): return key.replace("Inclusive ", "I ").replace(" [ms]", "") if key in ("Exclusive Mean [ms]", "Exclusive Max [ms]", "Exclusive Min [ms]"): @@ -70,4 +75,5 @@ def get_unit(key: str) -> str: if m := re.search(r"\[(?P[^]]+)\]", key): return m.group("unit") else: - assert False, f"Unsupported unit for '{key}'" + log.warn(f"Unregistered unit for '{key}'") + return "?" diff --git a/perun/view_diff/flamegraph/run.py b/perun/view_diff/flamegraph/run.py index d727cb46..bd98cc7e 100755 --- a/perun/view_diff/flamegraph/run.py +++ b/perun/view_diff/flamegraph/run.py @@ -254,6 +254,7 @@ def generate_flamegraph_difference( ), ) lhs_header, rhs_header = diff_kit.generate_headers(lhs_profile, rhs_profile) + lhs_meta, rhs_meta = diff_kit.generate_metadata(lhs_profile, rhs_profile) template = templates.get_template("diff_view_flamegraph.html.jinja2") content = template.render( @@ -262,11 +263,13 @@ def generate_flamegraph_difference( lhs_tag="Baseline (base)", lhs_top=table_run.get_top_n_records(lhs_profile, top_n=10, aggregated_key=data_type), lhs_stats=lhs_final_stats, + lhs_metadata=lhs_meta, lhs_uids=get_uids(lhs_profile), rhs_header=rhs_header, rhs_tag="Target (tgt)", rhs_top=table_run.get_top_n_records(rhs_profile, top_n=10, aggregated_key=data_type), rhs_stats=rhs_final_stats, + rhs_metadata=rhs_meta, rhs_uids=get_uids(rhs_profile), title="Differences of profiles (with flamegraphs)", data_types=data_types, diff --git a/perun/view_diff/report/run.py b/perun/view_diff/report/run.py index 28c8101b..e9165cb0 100755 --- a/perun/view_diff/report/run.py +++ b/perun/view_diff/report/run.py @@ -496,7 +496,7 @@ def process_traces( max_trace = 0 max_samples: dict[str, float] = defaultdict(float) for _, resource in log.progress(profile.all_resources(), description="Processing Traces"): - full_trace = [convert.to_uid(t, Config().minimize) for t in resource["trace"]] + full_trace = [convert.to_uid(t, Config().minimize) for t in resource.get("trace", {})] full_trace.append(convert.to_uid(resource["uid"], Config().minimize)) trace_len = len(full_trace) max_trace = max(max_trace, trace_len) @@ -743,6 +743,7 @@ def generate_report(lhs_profile: Profile, rhs_profile: Profile, **kwargs: Any) - Config().profile_stats["baseline"], Config().profile_stats["target"], ) + lhs_meta, rhs_meta = diff_kit.generate_metadata(lhs_profile, rhs_profile) env_filters = {"sanitize_variable_name": filters.sanitize_variable_name} template = templates.get_template("diff_view_report.html.jinja2", filters=env_filters) @@ -751,9 +752,11 @@ def generate_report(lhs_profile: Profile, rhs_profile: Profile, **kwargs: Any) - lhs_tag="Baseline (base)", lhs_header=lhs_header, lhs_stats=lhs_stats, + lhs_metadata=lhs_meta, rhs_tag="Target (tgt)", rhs_header=rhs_header, rhs_stats=rhs_stats, + rhs_metadata=rhs_meta, palette=WebColorPalette, callee_graph=graph.to_jinja_string("callees"), caller_graph=graph.to_jinja_string("callers"), diff --git a/tests/sources/imports/import-elk.json b/tests/sources/imports/import-elk.json new file mode 100755 index 00000000..ce5b8f2e --- /dev/null +++ b/tests/sources/imports/import-elk.json @@ -0,0 +1,58 @@ +{ + "queries": [ + { + "bench-suite": "dacapo", + "bench-suite-version": "9.12-MR1-git+2baec49", + "benchmark": "fop", + "benchmarking.end-ts": 1723120730, + "benchmarking.start-ts": 1723120721, + "machine.arch": "amd64", + "machine.cpu-clock": -1, + "machine.cpu-cores": 22, + "machine.cpu-family": "unknown", + "machine.hostname": "graalion", + "machine.ip": "", + "machine.name": "", + "machine.node": "", + "machine.os": "linux", + "machine.ram": -1, + "metric.better": "lower", + "metric.fork-number": 0, + "metric.iteration": 0, + "metric.name": "final-time", + "metric.score-function": "id", + "metric.score-value": 83, + "metric.type": "numeric", + "metric.unit": "ms", + "metric.uuid": "29ce6858-5583-11ef-b93e-3588b020d68d", + "metric.value": 83 + }, + { + "bench-suite": "dacapo", + "bench-suite-version": "9.12-MR1-git+2baec49", + "benchmark": "fop", + "benchmarking.end-ts": 1723120730, + "benchmarking.start-ts": 1723120721, + "machine.arch": "amd64", + "machine.cpu-clock": -1, + "machine.cpu-cores": 22, + "machine.cpu-family": "unknown", + "machine.hostname": "graalion", + "machine.ip": "", + "machine.name": "", + "machine.node": "", + "machine.os": "linux", + "machine.ram": -1, + "metric.better": "lower", + "metric.fork-number": 0, + "metric.iteration": 55, + "metric.name": "warmup", + "metric.score-function": "id", + "metric.score-value": 83, + "metric.type": "numeric", + "metric.unit": "ms", + "metric.uuid": "29ce6858-5583-11ef-b93e-3588b020d68d", + "metric.value": 83 + } + ] +} \ No newline at end of file diff --git a/tests/test_imports.py b/tests/test_imports.py index 791edda7..27c0b318 100755 --- a/tests/test_imports.py +++ b/tests/test_imports.py @@ -105,3 +105,15 @@ def test_imports(pcs_with_svs): ) assert result.exit_code == 1 assert len(os.listdir(os.path.join(".perun", "jobs"))) == 5 + + result = runner.invoke( + cli.cli, + [ + "import", + "elk", + "json", + os.path.join(pool_path, "import-elk.json"), + ], + ) + assert result.exit_code == 0 + assert len(os.listdir(os.path.join(".perun", "jobs"))) == 6 diff --git a/tests/test_utils.py b/tests/test_utils.py index 92a58a44..24df4925 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -308,8 +308,9 @@ def simple_generator(): raise Exception assert mapping.from_readable_key("Allocated Memory [B]") == "amount" - with pytest.raises(AssertionError): - mapping.get_unit("unsupported") + assert mapping.from_readable_key("Benchmarking Time [ms]") == "benchmarking.time" + assert mapping.get_readable_key("benchmarking.time") == "Benchmarking Time [ms]" + assert mapping.get_unit("unsupported") == "?" assert common_kit.hide_generics("std::vector>") == "std::vector<>" assert external_commands.is_executable("nonexisting") == False