Skip to content

Commit

Permalink
Fix problem ID and output in benchmark collection tests (#2570)
Browse files Browse the repository at this point in the history
* Reference value wasn't found for Fiedler_BMCSystBiol2016, because it was recently renamed
* Print all rows from gradient check results
  • Loading branch information
dweindl authored Oct 25, 2024
1 parent f5fa6cd commit ff6b3f1
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 10 deletions.
2 changes: 1 addition & 1 deletion tests/benchmark-models/benchmark_models.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ Elowitz_Nature2000:
t_adj: 0.11
note: benchmark collection reference value matches up to sign when applying log10-correction +sum(log(meas*log(10))) / 2

Fiedler_BMC2016:
Fiedler_BMCSystBiol2016:
llh: 58.58390161681
t_sim: 0.005
t_fwd: 0.05
Expand Down
29 changes: 20 additions & 9 deletions tests/benchmark-models/test_petab_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,19 @@
from petab.v1.visualize import plot_problem


logger = get_logger(f"amici.{__name__}", logging.WARNING)
# Enable various debug output
debug = False

logger = get_logger(
f"amici.{__name__}", logging.DEBUG if debug else logging.INFO
)

script_dir = Path(__file__).parent.absolute()
repo_root = script_dir.parent.parent
benchmark_outdir = repo_root / "test_bmc"
debug_path = script_dir / "debug"
if debug:
debug_path.mkdir(exist_ok=True, parents=True)

# reference values for simulation times and log-likelihoods
references_yaml = script_dir / "benchmark_models.yaml"
Expand Down Expand Up @@ -225,12 +233,6 @@ class GradientCheckSettings:
)


debug = False
if debug:
debug_path = Path(__file__).parent / "debug"
debug_path.mkdir(exist_ok=True, parents=True)


@pytest.fixture(scope="session", params=problems, ids=problems)
def benchmark_problem(request):
"""Fixture providing model and PEtab problem for a problem from
Expand Down Expand Up @@ -570,9 +572,18 @@ def assert_gradient_check_success(
df["rtol_success"] = df["rel_diff"] <= rtol
max_adiff = df["abs_diff"].max()
max_rdiff = df["rel_diff"].max()
with pd.option_context("display.max_columns", None, "display.width", None):

success_fail = "succeeded" if check_result.success else "failed"
with pd.option_context(
"display.max_columns",
None,
"display.width",
None,
"display.max_rows",
None,
):
message = (
f"Gradient check failed:\n{df}\n\n"
f"Gradient check {success_fail}:\n{df}\n\n"
f"Maximum absolute difference: {max_adiff} (tolerance: {atol})\n"
f"Maximum relative difference: {max_rdiff} (tolerance: {rtol})"
)
Expand Down

0 comments on commit ff6b3f1

Please sign in to comment.