Skip to content

Commit

Permalink
Benchmark model tests: reduce gradient check tolerances
Browse files Browse the repository at this point in the history
Also sort test cases to avoid issues with pytest-xdist
  • Loading branch information
dweindl committed Sep 30, 2024
1 parent 99a1636 commit d6c5c11
Show file tree
Hide file tree
Showing 2 changed files with 182 additions and 82 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test_benchmark_collection_models.yml
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ jobs:
- name: Run Gradient Checks
run: |
pip install git+https://github.com/ICB-DCM/fiddy.git \
&& cd tests/benchmark-models && pytest ./test_petab_benchmark.py
&& cd tests/benchmark-models && pytest --durations=10 ./test_petab_benchmark.py
# upload results
- uses: actions/upload-artifact@v4
Expand Down
262 changes: 181 additions & 81 deletions tests/benchmark-models/test_petab_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,22 @@

from pathlib import Path

import fiddy

import amici
import numpy as np
import pandas as pd
import petab.v1 as petab
import pytest
from amici.petab.petab_import import import_petab_problem
import benchmark_models_petab


# Absolute and relative tolerances for finite difference gradient checks.
ATOL: float = 1e-3
RTOL: float = 1e-2
from collections import defaultdict
from dataclasses import dataclass
from amici import SensitivityMethod
from fiddy import MethodId, get_derivative
from fiddy.derivative_check import NumpyIsCloseDerivativeCheck
from fiddy.extensions.amici import simulate_petab_to_cached_functions
from fiddy.success import Consistency

repo_root = Path(__file__).parent.parent.parent

Expand All @@ -30,30 +34,99 @@
"Beer_MolBioSystems2014",
"Alkan_SciSignal2018",
"Lang_PLOSComputBiol2024",
"Smith_BMCSystBiol2013",
# excluded due to excessive numerical failures
"Crauste_CellSystems2017",
"Fujita_SciSignal2010",
# FIXME: re-enable
"Raia_CancerResearch2011",
"Zheng_PNAS2012",
}
models = list(sorted(models))

debug = False
if debug:
debug_path = Path(__file__).parent / "debug"
debug_path.mkdir(exist_ok=True, parents=True)


# until fiddy is updated
@dataclass
class GradientCheckSettings:
# Absolute and relative tolerances for simulation
atol_sim: float = 1e-16
rtol_sim: float = 1e-12
# Absolute and relative tolerances for finite difference gradient checks.
atol_check: float = 1e-3
rtol_check: float = 1e-2
# Step sizes for finite difference gradient checks.
step_sizes = [
1e-1,
5e-2,
1e-2,
1e-3,
1e-4,
1e-5,
]
rng_seed: int = 0


settings = defaultdict(GradientCheckSettings)
settings["Smith_BMCSystBiol2013"] = GradientCheckSettings(
atol_sim=1e-10,
rtol_sim=1e-10,
)
settings["Oliveira_NatCommun2021"] = GradientCheckSettings(
atol_sim=1e-10,
rtol_sim=1e-10,
)
settings["Okuonghae_ChaosSolitonsFractals2020"] = GradientCheckSettings(
atol_sim=1e-14,
rtol_sim=1e-14,
)
settings["Okuonghae_ChaosSolitonsFractals2020"].step_sizes.insert(0, 0.2)
settings["Giordano_Nature2020"] = GradientCheckSettings(
atol_check=1e-6, rtol_check=1e-3, rng_seed=1
)


def assert_gradient_check_success(
derivative: fiddy.Derivative,
expected_derivative: np.array,
point: np.array,
atol: float,
rtol: float,
) -> None:
if not derivative.df.success.all():
raise AssertionError(
f"Failed to compute finite differences:\n{derivative.df}"
)
check = NumpyIsCloseDerivativeCheck(
derivative=derivative,
expectation=expected_derivative,
point=point,
)
check_result = check(rtol=rtol, atol=atol)

if check_result.success is True:
return

raise AssertionError(f"Gradient check failed:\n{check_result.df}")


@pytest.mark.filterwarnings(
"ignore:Importing amici.petab_objective is deprecated.:DeprecationWarning"
"ignore:divide by zero encountered in log",
# https://github.com/AMICI-dev/AMICI/issues/18
"ignore:Adjoint sensitivity analysis for models with discontinuous "
"right hand sides .*:UserWarning",
)
@pytest.mark.parametrize("scale", (True, False), ids=["scaled", "unscaled"])
@pytest.mark.parametrize(
"sensitivity_method",
(SensitivityMethod.forward, SensitivityMethod.adjoint),
ids=["forward", "adjoint"],
)
@pytest.mark.filterwarnings("ignore:divide by zero encountered in log10")
@pytest.mark.parametrize("scale", (True, False))
@pytest.mark.parametrize("model", models)
def test_benchmark_gradient(model, scale):
from fiddy import MethodId, get_derivative
from fiddy.derivative_check import NumpyIsCloseDerivativeCheck
from fiddy.extensions.amici import simulate_petab_to_cached_functions
from fiddy.success import Consistency

def test_benchmark_gradient(model, scale, sensitivity_method, request):
if not scale and model in (
"Smith_BMCSystBiol2013",
"Brannmark_JBC2010",
Expand All @@ -67,6 +140,28 @@ def test_benchmark_gradient(model, scale):
# only fail on linear scale
pytest.skip()

if (
model
in (
"Blasi_CellSystems2016",
"Oliveira_NatCommun2021",
)
and sensitivity_method == SensitivityMethod.adjoint
):
# FIXME
pytest.skip()

if (
model
in (
"Weber_BMC2015",
"Sneyd_PNAS2002",
)
and sensitivity_method == SensitivityMethod.forward
):
# FIXME
pytest.skip()

petab_problem = benchmark_models_petab.get_problem(model)
petab.flatten_timepoint_specific_output_overrides(petab_problem)

Expand All @@ -75,25 +170,18 @@ def test_benchmark_gradient(model, scale):
petab_problem.x_free_ids
]
parameter_ids = list(parameter_df_free.index)
cur_settings = settings[model]

# Setup AMICI objects.
amici_model = import_petab_problem(
petab_problem,
model_output_dir=benchmark_outdir / model,
)
amici_solver = amici_model.getSolver()
amici_solver.setAbsoluteTolerance(1e-12)
amici_solver.setRelativeTolerance(1e-12)
if model in (
"Smith_BMCSystBiol2013",
"Oliveira_NatCommun2021",
):
amici_solver.setAbsoluteTolerance(1e-10)
amici_solver.setRelativeTolerance(1e-10)
elif model in ("Okuonghae_ChaosSolitonsFractals2020",):
amici_solver.setAbsoluteTolerance(1e-14)
amici_solver.setRelativeTolerance(1e-14)
amici_solver.setAbsoluteTolerance(cur_settings.atol_sim)
amici_solver.setRelativeTolerance(cur_settings.rtol_sim)
amici_solver.setMaxSteps(int(1e5))
amici_solver.setSensitivityMethod(sensitivity_method)

if model in ("Brannmark_JBC2010",):
amici_model.setSteadyStateSensitivityMode(
Expand All @@ -107,78 +195,90 @@ def test_benchmark_gradient(model, scale):
solver=amici_solver,
scaled_parameters=scale,
scaled_gradients=scale,
cache=not debug,
# FIXME: there is some issue with caching in fiddy
# e.g. Elowitz_Nature2000-True fails with cache=True,
# but not with cache=False
# cache=not debug,
cache=False,
)

noise_level = 0.1
np.random.seed(cur_settings.rng_seed)

np.random.seed(0)
if scale:
point = np.asarray(
list(
petab_problem.scale_parameters(
dict(parameter_df_free.nominalValue)
).values()
# find a point where the derivative can be computed
for _ in range(5):
if scale:
point = np.asarray(
list(
petab_problem.scale_parameters(
dict(parameter_df_free.nominalValue)
).values()
)
)
)
point_noise = np.random.randn(len(point)) * noise_level
else:
point = parameter_df_free.nominalValue.values
point_noise = np.random.randn(len(point)) * point * noise_level
point += point_noise # avoid small gradients at nominal value

expected_derivative = amici_derivative(point)
point_noise = np.random.randn(len(point)) * noise_level
else:
point = parameter_df_free.nominalValue.values
point_noise = np.random.randn(len(point)) * point * noise_level
point += point_noise # avoid small gradients at nominal value

sizes = [
1e-1,
1e-2,
1e-3,
1e-4,
1e-5,
]
if model in ("Okuonghae_ChaosSolitonsFractals2020",):
sizes.insert(0, 0.2)
try:
expected_derivative = amici_derivative(point)
break
except RuntimeError as e:
print(e)
continue
else:
raise RuntimeError("Could not compute expected derivative.")

derivative = get_derivative(
function=amici_function,
point=point,
sizes=sizes,
sizes=cur_settings.step_sizes,
direction_ids=parameter_ids,
method_ids=[MethodId.CENTRAL, MethodId.FORWARD, MethodId.BACKWARD],
success_checker=Consistency(atol=1e-4, rtol=0.2),
success_checker=Consistency(atol=1e-5, rtol=1e-1),
expected_result=expected_derivative,
relative_sizes=not scale,
)
success = False
if derivative.df.success.all():
check = NumpyIsCloseDerivativeCheck(
derivative=derivative,
expectation=expected_derivative,
point=point,
)
success = check(rtol=RTOL, atol=ATOL)

if debug:
df = pd.DataFrame(
[
{
(
"fd",
r.metadata["size_absolute"],
str(r.method_id),
): r.value
for c in d.computers
for r in c.results
}
for d in derivative.directional_derivatives
],
index=parameter_ids,
write_debug_output(
debug_path / f"{request.node.callspec.id}.tsv",
derivative,
expected_derivative,
parameter_ids,
)
df[("fd", "full", "")] = derivative.series.values
df[("amici", "", "")] = expected_derivative

file_name = f"{model}_scale={scale}.tsv"
df.to_csv(debug_path / file_name, sep="\t")
assert_gradient_check_success(
derivative,
expected_derivative,
point,
rtol=cur_settings.rtol_check,
atol=cur_settings.atol_check,
)


def write_debug_output(
file_name, derivative, expected_derivative, parameter_ids
):
df = pd.DataFrame(
[
{
(
"fd",
r.metadata["size_absolute"],
str(r.method_id),
): r.value
for c in d.computers
for r in c.results
}
for d in derivative.directional_derivatives
],
index=parameter_ids,
)
df[("fd", "full", "")] = derivative.series.values
df[("amici", "", "")] = expected_derivative
df["abs_diff"] = np.abs(df[("fd", "full", "")] - df[("amici", "", "")])
df["rel_diff"] = df["abs_diff"] / np.abs(df[("amici", "", "")])

# The gradients for all parameters are correct.
assert success, derivative.df
df.to_csv(file_name, sep="\t")

0 comments on commit d6c5c11

Please sign in to comment.