From 56221c8c9f675710e4b8792d97717da4936092a7 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 25 Jun 2023 07:32:55 +0200 Subject: [PATCH 01/32] Ignore valgrind/numpy longdouble warning --- scripts/run-valgrind-py.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/run-valgrind-py.sh b/scripts/run-valgrind-py.sh index 45e565c756..510e27868a 100755 --- a/scripts/run-valgrind-py.sh +++ b/scripts/run-valgrind-py.sh @@ -24,4 +24,8 @@ PYTHONMALLOC=malloc valgrind \ --leak-check=full \ --gen-suppressions=all \ -v \ - python -m pytest -vv --ignore-glob=*petab* + python -m pytest -vv --ignore-glob=*petab* -W "ignore:Signature " +# ^ ignores the following warning that occurs only under valgrind, +# e.g. `valgrind python -c "import h5py"`: +# UserWarning: Signature b'\x00\xd0\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf\x00\x00\x00\x00\x00\x00' +# for does not match any known type: falling back to type probe function. From 3a79b749d0ade40e7c294de3cef9da8139ec7f97 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 25 Jun 2023 06:31:54 +0200 Subject: [PATCH 02/32] v0.18.1 release notes --- CHANGELOG.md | 22 +++++++++++++++++++++- version.txt | 2 +- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a3e07b657..0ab17965d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,27 @@ ## v0.X Series +### v0.18.1 (2023-06-26) + +Fixes: +* Fixed pysb pattern matching during PEtab import + by @FFroehlich in https://github.com/AMICI-dev/AMICI/pull/2118 +* Fixed `sp.Matrix` errors with `numpy==1.25` + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2124 +* Readme: added info containers + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2125 +* Fixed deprecation warnings + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2122 + https://github.com/AMICI-dev/AMICI/pull/2131 +* Fixed logging typo in SBML import + by @dilpath in https://github.com/AMICI-dev/AMICI/pull/2126 +* Added minimum version for `pandas` + by @dilpath in https://github.com/AMICI-dev/AMICI/pull/2129 + +**Full Changelog**: https://github.com/AMICI-dev/AMICI/compare/v0.18.0...v0.18.1 + ### v0.18.0 (2023-05-26) + Features: * More efficient handling of splines in SBML models by @paulstapor, @lcontento, @dweindl @@ -10,7 +30,7 @@ Features: * Partial support of current PEtab2.0 draft, including support for PySB models by @dweindl, @FFroehlich in https://github.com/AMICI-dev/AMICI/pull/1800 -Fixes +Fixes: * **Fixed incorrect forward sensitivities for models with events with** **state-dependent trigger functions** by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2084 diff --git a/version.txt b/version.txt index 66333910a4..249afd517d 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.18.0 +0.18.1 From b1b4b2b58d7fc848195108aa93d363f6b0dd85ef Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 26 Jun 2023 15:12:19 +0200 Subject: [PATCH 03/32] SBML import: Support for rateOf (#2120) Adds support for SBML's `rateOf(.)` for most not-so-exotic cases. We currently can't check for functions with ``, and only check for functions named `rateOf`. This should be safe as long as we use the `SBMLFunctionDefinitionConverter` before. Tested in the following SBML semantic test suite cases: 01248,01249,01250,01251,01252,01253,01254,01255,01256,01257,01258,01259,01260,01261,01262,01263,01264,01265,01266,01267,01268,01269,01270,01293,01294,01295,01296,01297,01298,01299,01321,01322,01400,01401,01402,01403,01405,01406,01408,01409,01455,01456,01457,01458,01459,01460,01461,01462,01463,01482,01483,01525,01526,01527,01528,01529,01540,01541,01542,01543 Closes #769 --- documentation/python_interface.rst | 3 +- pytest.ini | 2 + python/sdist/amici/de_export.py | 90 ++++++++++++++++++- python/sdist/amici/import_utils.py | 7 +- python/sdist/amici/sbml_import.py | 59 +++++++++++- python/sdist/setup.cfg | 1 + .../test_sbml_import_special_functions.py | 60 ++++++++++++- 7 files changed, 210 insertions(+), 12 deletions(-) diff --git a/documentation/python_interface.rst b/documentation/python_interface.rst index a727fb3ea6..8ccc77e661 100644 --- a/documentation/python_interface.rst +++ b/documentation/python_interface.rst @@ -26,7 +26,7 @@ AMICI can import :term:`SBML` models via the Status of SBML support in Python-AMICI ++++++++++++++++++++++++++++++++++++++ -Python-AMICI currently **passes 1215 out of the 1821 (~67%) test cases** from +Python-AMICI currently **passes 1247 out of the 1821 (~68%) test cases** from the semantic `SBML Test Suite `_ (`current status `_). @@ -42,6 +42,7 @@ The following SBML test suite tags are currently supported * comp * Compartment * CSymbolAvogadro +* CSymbolRateOf * CSymbolTime * Deletion * EventNoDelay diff --git a/pytest.ini b/pytest.ini index cebd4ab2c2..3868c80b1e 100644 --- a/pytest.ini +++ b/pytest.ini @@ -17,5 +17,7 @@ filterwarnings = ignore:Model.initial_conditions will be removed in a future version. Instead, you can get a list of Initial objects with Model.initials.:DeprecationWarning:pysb\.core # https://github.com/pytest-dev/pytest-xdist/issues/825#issuecomment-1292283870 ignore:The --rsyncdir command line argument and rsyncdirs config variable are deprecated.:DeprecationWarning + ignore:.*:ImportWarning:tellurium + ignore:.*PyDevIPCompleter6.*:DeprecationWarning norecursedirs = .git amici_models build doc documentation matlab models ThirdParty amici sdist examples diff --git a/python/sdist/amici/de_export.py b/python/sdist/amici/de_export.py index e7d1750184..333f8f3dc2 100644 --- a/python/sdist/amici/de_export.py +++ b/python/sdist/amici/de_export.py @@ -1101,6 +1101,87 @@ def transform_dxdt_to_concentration(species_id, dxdt): for llh in si.symbols[SymbolId.LLHY].values() ) + self._process_sbml_rate_of(symbols)# substitute SBML-rateOf constructs + + def _process_sbml_rate_of(self, symbols) -> None: + """Substitute any SBML-rateOf constructs in the model equations""" + rate_of_func = sp.core.function.UndefinedFunction("rateOf") + species_sym_to_xdot = dict(zip(self.sym("x"), self.sym("xdot"))) + species_sym_to_idx = {x: i for i, x in enumerate(self.sym("x"))} + + def get_rate(symbol: sp.Symbol): + """Get rate of change of the given symbol""" + nonlocal symbols + + if symbol.find(rate_of_func): + raise SBMLException("Nesting rateOf() is not allowed.") + + # Replace all rateOf(some_species) by their respective xdot equation + with contextlib.suppress(KeyError): + return self._eqs["xdot"][species_sym_to_idx[symbol]] + + # For anything other than a state, rateOf(.) is 0 or invalid + return 0 + + # replace rateOf-instances in xdot by xdot symbols + for i_state in range(len(self.eq("xdot"))): + if rate_ofs := self._eqs["xdot"][i_state].find(rate_of_func): + self._eqs["xdot"][i_state] = self._eqs["xdot"][i_state].subs( + { + # either the rateOf argument is a state, or it's 0 + rate_of: species_sym_to_xdot.get(rate_of.args[0], 0) + for rate_of in rate_ofs + } + ) + # substitute in topological order + subs = toposort_symbols(dict(zip(self.sym("xdot"), self.eq("xdot")))) + self._eqs["xdot"] = smart_subs_dict(self.eq("xdot"), subs) + + # replace rateOf-instances in x0 by xdot equation + for i_state in range(len(self.eq("x0"))): + if rate_ofs := self._eqs["x0"][i_state].find(rate_of_func): + self._eqs["x0"][i_state] = self._eqs["x0"][i_state].subs( + {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs} + ) + + for component in chain(self.observables(), self.expressions(), self.events(), self._algebraic_equations): + if rate_ofs := component.get_val().find(rate_of_func): + if isinstance(component, Event): + # TODO froot(...) can currently not depend on `w`, so this substitution fails for non-zero rates + # see, e.g., sbml test case 01293 + raise SBMLException( + "AMICI does currently not support rateOf(.) inside event trigger functions." + ) + + if isinstance(component, AlgebraicEquation): + # TODO IDACalcIC fails with + # "The linesearch algorithm failed: step too small or too many backtracks." + # see, e.g., sbml test case 01482 + raise SBMLException( + "AMICI does currently not support rateOf(.) inside AlgebraicRules." + ) + + component.set_val( + component.get_val().subs( + {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs} + ) + ) + + for event in self.events(): + if event._state_update is None: + continue + + for i_state in range(len(event._state_update)): + if rate_ofs := event._state_update[i_state].find(rate_of_func): + raise SBMLException( + "AMICI does currently not support rateOf(.) inside event state updates." + ) + # TODO here we need xdot sym, not eqs + # event._state_update[i_state] = event._state_update[i_state].subs( + # {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs} + # ) + + def add_component( self, component: ModelQuantity, insert_first: Optional[bool] = False ) -> None: @@ -2758,11 +2839,11 @@ def _generate_c_code(self) -> None: # only generate for those that have nontrivial implementation, # check for both basic variables (not in functions) and function # computed values - if ( + if (( name in self.functions and not self.functions[name].body and name not in nobody_functions - ) or (name not in self.functions and len(self.model.sym(name)) == 0): + ) or name not in self.functions) and len(self.model.sym(name)) == 0: continue self._write_index_files(name) @@ -2982,7 +3063,10 @@ def _write_function_file(self, function: str) -> None: else: iszero = len(self.model.sym(sym)) == 0 - if iszero: + if iszero and not ( + (sym == "y" and "Jy" in function) + or (sym == "w" and "xdot" in function and len(self.model.sym(sym))) + ): continue lines.append(f'#include "{sym}.h"') diff --git a/python/sdist/amici/import_utils.py b/python/sdist/amici/import_utils.py index 6c2e6b0e7e..953af3dd85 100644 --- a/python/sdist/amici/import_utils.py +++ b/python/sdist/amici/import_utils.py @@ -586,9 +586,10 @@ def _check_unsupported_functions( sp.core.function.UndefinedFunction, ) - if isinstance(sym.func, unsupported_functions) or isinstance( - sym, unsupported_functions - ): + if ( + isinstance(sym.func, unsupported_functions) + or isinstance(sym, unsupported_functions) + ) and getattr(sym.func, "name", "") != "rateOf": raise RuntimeError( f"Encountered unsupported expression " f'"{sym.func}" of type ' diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index 7abd0450a9..25b54f5c93 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -804,10 +804,20 @@ def _process_species_initial(self): if species: species["init"] = initial + # hide rateOf-arguments from toposort and the substitution below + all_rateof_dummies = [] + for species in self.symbols[SymbolId.SPECIES].values(): + species["init"], rateof_dummies = _rateof_to_dummy(species["init"]) + all_rateof_dummies.append(rateof_dummies) + # don't assign this since they need to stay in order sorted_species = toposort_symbols(self.symbols[SymbolId.SPECIES], "init") - for species in self.symbols[SymbolId.SPECIES].values(): - species["init"] = smart_subs_dict(species["init"], sorted_species, "init") + for species, rateof_dummies in zip(self.symbols[SymbolId.SPECIES].values(), all_rateof_dummies): + species["init"] = _dummy_to_rateof( + smart_subs_dict(species["init"], sorted_species, "init"), + rateof_dummies + ) + @log_execution_time("processing SBML rate rules", logger) def _process_rate_rules(self): @@ -990,6 +1000,18 @@ def _process_parameters(self, constant_parameters: List[str] = None) -> None: "value": par.getValue(), } + # Parameters that need to be turned into expressions + # so far, this concerns parameters with initial assignments containing rateOf(.) + # (those have been skipped above) + for par in self.sbml.getListOfParameters(): + if (ia := self._get_element_initial_assignment(par.getId())) is not None \ + and ia.find(sp.core.function.UndefinedFunction("rateOf")): + self.symbols[SymbolId.EXPRESSION][_get_identifier_symbol(par)] = { + "name": par.getName() if par.isSetName() else par.getId(), + "value": ia, + } + + @log_execution_time("processing SBML reactions", logger) def _process_reactions(self): """ @@ -1774,16 +1796,19 @@ def _make_initial( :return: transformed expression """ - if not isinstance(sym_math, sp.Expr): return sym_math + sym_math, rateof_to_dummy = _rateof_to_dummy(sym_math) + for species_id, species in self.symbols[SymbolId.SPECIES].items(): if "init" in species: sym_math = smart_subs(sym_math, species_id, species["init"]) sym_math = smart_subs(sym_math, self._local_symbols["time"], sp.Float(0)) + sym_math = _dummy_to_rateof(sym_math, rateof_to_dummy) + return sym_math def process_conservation_laws(self, ode_model) -> None: @@ -2663,3 +2688,31 @@ def _non_const_conservation_laws_supported(sbml_model: sbml.Model) -> bool: return False return True + + +def _rateof_to_dummy(sym_math): + """Replace rateOf(...) by dummy variable + + if `rateOf(some_species)` is used in an initial assignment, we don't want to substitute the species argument + by its initial value. + + Usage: + sym_math, rateof_to_dummy = _rateof_to_dummy(sym_math) + [...substitute...] + sym_math = _dummy_to_rateof(sym_math, rateof_to_dummy) + """ + if rate_ofs := sym_math.find( + sp.core.function.UndefinedFunction("rateOf") + ): + # replace by dummies to avoid species substitution + rateof_dummies = {rate_of: sp.Dummy(f"Dummy_RateOf_{rate_of.args[0].name}") for rate_of in rate_ofs} + + return sym_math.subs(rateof_dummies), rateof_dummies + return sym_math, {} + + +def _dummy_to_rateof(sym_math, rateof_dummies): + """Back-substitution of dummies from `_rateof_to_dummy`""" + if rateof_dummies: + return sym_math.subs({v: k for k, v in rateof_dummies.items()}) + return sym_math diff --git a/python/sdist/setup.cfg b/python/sdist/setup.cfg index ecf0a74c0a..706c407652 100644 --- a/python/sdist/setup.cfg +++ b/python/sdist/setup.cfg @@ -52,6 +52,7 @@ test = pytest-rerunfailures coverage shyaml + tellurium vis = matplotlib seaborn diff --git a/python/tests/test_sbml_import_special_functions.py b/python/tests/test_sbml_import_special_functions.py index 9bcdb66bae..fa9d94579f 100644 --- a/python/tests/test_sbml_import_special_functions.py +++ b/python/tests/test_sbml_import_special_functions.py @@ -6,12 +6,14 @@ import os -import amici import numpy as np import pytest +from numpy.testing import assert_array_almost_equal_nulp, assert_approx_equal +from scipy.special import loggamma + +import amici from amici.gradient_check import check_derivatives from amici.testing import TemporaryDirectoryWinSafe, skip_on_valgrind -from scipy.special import loggamma @pytest.fixture(scope="session") @@ -153,3 +155,57 @@ def negative_binomial_nllh(m: np.ndarray, y: np.ndarray, p: float): - r * np.log(1 - p) - m * np.log(p) ) + +@pytest.mark.filterwarnings("ignore:the imp module is deprecated:DeprecationWarning") +def test_rateof(): + """Test chained rateOf to verify that model expressions are evaluated in the correct order.""" + import tellurium as te + + ant_model = """ + model test_chained_rateof + species S1, S2, S3, S4; + S1 = 0; + S3 = 0; + p2 = 1; + rate = 1; + S4 = 0.5 * rateOf(S3); + S2' = 2 * rateOf(S3); + S1' = S2 + rateOf(S2); + S3' = rate; + p1 = 2 * rateOf(S1); + p2' = rateOf(S1); + p3 = rateOf(rate); + end + """ + sbml_str = te.antimonyToSBML(ant_model) + sbml_importer = amici.SbmlImporter(sbml_str, from_file=False) + + module_name = "test_chained_rateof" + with TemporaryDirectoryWinSafe(prefix=module_name) as outdir: + sbml_importer.sbml2amici( + model_name=module_name, + output_dir=outdir, + ) + model_module = amici.import_model_module(module_name=module_name, module_path=outdir) + amici_model = model_module.getModel() + t = np.linspace(0, 10, 11) + amici_model.setTimepoints(t) + amici_solver = amici_model.getSolver() + rdata = amici.runAmiciSimulation(amici_model, amici_solver) + + state_ids_solver = amici_model.getStateIdsSolver() + i_S1 = state_ids_solver.index("S1") + i_S2 = state_ids_solver.index("S2") + i_S3 = state_ids_solver.index("S3") + i_p2 = state_ids_solver.index("p2") + assert_approx_equal(rdata["xdot"][i_S3], 1) + assert_approx_equal(rdata["xdot"][i_S2], 2) + assert_approx_equal(rdata["xdot"][i_S1], rdata.by_id("S2")[-1] + rdata["xdot"][i_S2]) + assert_approx_equal(rdata["xdot"][i_S1], rdata["xdot"][i_p2]) + + assert_array_almost_equal_nulp(rdata.by_id("S3"), t, 10) + assert_array_almost_equal_nulp(rdata.by_id("S2"), 2 * rdata.by_id("S3")) + assert_array_almost_equal_nulp(rdata.by_id("S4")[1:], 0.5 * np.diff(rdata.by_id("S3")), 10) + assert_array_almost_equal_nulp(rdata.by_id("p3"), 0) + assert_array_almost_equal_nulp(rdata.by_id("p2"), 1 + rdata.by_id("S1")) + From 29dce6ebb8597669a3a8a099972d6276be9d2bdc Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 26 Jun 2023 16:14:21 +0200 Subject: [PATCH 04/32] GHA: libncurses5 Required for tellurium (via roadrunner) --- .github/workflows/test_python_ver_matrix.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_python_ver_matrix.yml b/.github/workflows/test_python_ver_matrix.yml index 59dcf91041..e790ffdd79 100644 --- a/.github/workflows/test_python_ver_matrix.yml +++ b/.github/workflows/test_python_ver_matrix.yml @@ -49,7 +49,8 @@ jobs: swig \ libatlas-base-dev \ libhdf5-serial-dev \ - libboost-math-dev + libboost-math-dev \ + libncurses5 # install AMICI - name: Build BNGL From 6dc927ddc7fa75ce7b5f5b92900603b4d583f974 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Wed, 28 Jun 2023 11:11:13 +0200 Subject: [PATCH 05/32] GHA: libncurses5 --- .github/workflows/test_python_cplusplus.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_python_cplusplus.yml b/.github/workflows/test_python_cplusplus.yml index 8a4eaa3bb8..9fecd9825a 100644 --- a/.github/workflows/test_python_cplusplus.yml +++ b/.github/workflows/test_python_cplusplus.yml @@ -64,6 +64,7 @@ jobs: libboost-serialization-dev \ libboost-chrono-dev \ libhdf5-serial-dev \ + libncurses5 \ python3-venv \ swig \ lcov \ From 33550fb9ae95232e7a889137e77f7659898578c3 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 29 Jun 2023 11:20:07 +0200 Subject: [PATCH 06/32] Fix nbconvert installation check Fixes `Jupyter command \`jupyter-nbconvert\` not found.` --- scripts/runNotebook.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/runNotebook.sh b/scripts/runNotebook.sh index 4fa815a5b5..c0f246d7bb 100755 --- a/scripts/runNotebook.sh +++ b/scripts/runNotebook.sh @@ -26,7 +26,7 @@ if [ $# -eq 0 ]; then fi source ${AMICI_PATH}/build/venv/bin/activate -pip3 show ipython || (pip3 install --upgrade jupyter jupyter_contrib_nbextensions && python3 -m ipykernel install --user --name amici --display-name "Python (amici)") +pip3 show nbconvert || (pip3 install --upgrade nbconvert && python3 -m ipykernel install --user --name amici --display-name "Python (amici)") for arg in "$@"; do if [ -d $arg ]; then From bb1730061b7d2a6763a341a02a2eec8ceebbe033 Mon Sep 17 00:00:00 2001 From: Polina Lakrisenko Date: Thu, 29 Jun 2023 13:52:28 +0200 Subject: [PATCH 07/32] Add steady-state computation modes (#2074) * add SteadyStateComputationMode * fix namig * add steadystate_computation_mode_ to model * fix steadystate_sensitivity_mode default * fix * correct processPreEquilibration comment * correct x_ss and sx_ss description * clangformat * Update model settings * enum * serialization * .. * update docstrings in test_preequilibration * add SteadyStateComputationMode test * test that results with different steady-state computation modes are close * add compatibility check for SteadyStateComputationMode * split assertions and use numpy.testing.assert_allclose * change 'assert np.isclose' to 'assert_allclose' in all test_preequilibration tests * fix default values in test_swig_interface * nbconvert * fix example_errors notebook * nbconvert * -- --------- Co-authored-by: Daniel Weindl Co-authored-by: Daniel Weindl --- include/amici/defines.h | 7 ++ include/amici/model.h | 25 ++++-- include/amici/rdata.h | 4 +- include/amici/serialization.h | 2 + python/examples/example_errors.ipynb | 3 +- python/sdist/amici/swig_wrappers.py | 17 +++- python/tests/test_preequilibration.py | 115 ++++++++++++++++++++------ python/tests/test_swig_interface.py | 6 +- src/hdf5.cpp | 8 ++ src/model.cpp | 9 ++ src/rdata.cpp | 2 +- src/steadystateproblem.cpp | 23 +++++- swig/amici.i | 1 + 13 files changed, 183 insertions(+), 39 deletions(-) diff --git a/include/amici/defines.h b/include/amici/defines.h index df959f48d5..068b8a9d54 100644 --- a/include/amici/defines.h +++ b/include/amici/defines.h @@ -186,6 +186,13 @@ enum class NonlinearSolverIteration { newton = 2 }; +/** Steady-state computation mode in steadyStateProblem */ +enum class SteadyStateComputationMode { + newtonOnly, + integrationOnly, + integrateIfNewtonFails +}; + /** Sensitivity computation mode in steadyStateProblem */ enum class SteadyStateSensitivityMode { newtonOnly, diff --git a/include/amici/model.h b/include/amici/model.h index 27f6b5a213..72f733e6cf 100644 --- a/include/amici/model.h +++ b/include/amici/model.h @@ -853,6 +853,20 @@ class Model : public AbstractModel, public ModelDimensions { */ void setUnscaledInitialStateSensitivities(std::vector const& sx0); + /** + * @brief Set the mode how steady state is computed in the steadystate + * simulation. + * @param mode Steadystate computation mode + */ + void setSteadyStateComputationMode(SteadyStateComputationMode mode); + + /** + * @brief Gets the mode how steady state is computed in the steadystate + * simulation. + * @return Mode + */ + SteadyStateComputationMode getSteadyStateComputationMode() const; + /** * @brief Set the mode how sensitivities are computed in the steadystate * simulation. @@ -1977,12 +1991,13 @@ class Model : public AbstractModel, public ModelDimensions { /** maximal number of events to track */ int nmaxevent_{10}; - /** - * flag indicating whether steadystate sensitivities are to be computed - * via FSA when steadyStateSimulation is used - */ + /** method for steady-state computation */ + SteadyStateComputationMode steadystate_computation_mode_{ + SteadyStateComputationMode::integrateIfNewtonFails}; + + /** method for steadystate sensitivities computation */ SteadyStateSensitivityMode steadystate_sensitivity_mode_{ - SteadyStateSensitivityMode::newtonOnly}; + SteadyStateSensitivityMode::integrateIfNewtonFails}; /** * Indicates whether the result of every call to `Model::f*` should be diff --git a/include/amici/rdata.h b/include/amici/rdata.h index 98c512d4fe..3b9ea01b9f 100644 --- a/include/amici/rdata.h +++ b/include/amici/rdata.h @@ -361,14 +361,14 @@ class ReturnData : public ModelDimensions { /** initial state (shape `nx`) */ std::vector x0; - /** preequilibration steady state found by Newton solver (shape `nx`) */ + /** preequilibration steady state (shape `nx`) */ std::vector x_ss; /** initial sensitivities (shape `nplist` x `nx`, row-major) */ std::vector sx0; /** - * preequilibration sensitivities found by Newton solver + * preequilibration sensitivities * (shape `nplist` x `nx`, row-major) */ std::vector sx_ss; diff --git a/include/amici/serialization.h b/include/amici/serialization.h index 2695e38980..7d0428f71f 100644 --- a/include/amici/serialization.h +++ b/include/amici/serialization.h @@ -144,6 +144,8 @@ void serialize(Archive& ar, amici::Model& m, unsigned int const /*version*/) { ar& m.pythonGenerated; ar& m.min_sigma_; ar& m.sigma_res_; + ar& m.steadystate_computation_mode_; + ar& m.steadystate_sensitivity_mode_; } /** diff --git a/python/examples/example_errors.ipynb b/python/examples/example_errors.ipynb index 31325d4dd7..988afb98a3 100644 --- a/python/examples/example_errors.ipynb +++ b/python/examples/example_errors.ipynb @@ -640,6 +640,7 @@ "amici_solver = amici_model.getSolver()\n", "amici_solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n", "amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n", + "amici_model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n", "\n", "np.random.seed(2020)\n", "problem_parameters = dict(\n", @@ -960,7 +961,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/python/sdist/amici/swig_wrappers.py b/python/sdist/amici/swig_wrappers.py index 65798cd6ac..a732fa7d79 100644 --- a/python/sdist/amici/swig_wrappers.py +++ b/python/sdist/amici/swig_wrappers.py @@ -181,7 +181,11 @@ def runAmiciSimulations( with _capture_cstdout(): edata_ptr_vector = amici_swig.ExpDataPtrVector(edata_list) rdata_ptr_list = amici_swig.runAmiciSimulations( - _get_ptr(solver), edata_ptr_vector, _get_ptr(model), failfast, num_threads + _get_ptr(solver), + edata_ptr_vector, + _get_ptr(model), + failfast, + num_threads, ) for rdata in rdata_ptr_list: _log_simulation(rdata) @@ -240,6 +244,7 @@ def writeSolverSettingsToHDF5( "ReinitializationStateIdxs", "ReinitializeFixedParameterInitialStates", "StateIsNonNegative", + "SteadyStateComputationMode", "SteadyStateSensitivityMode", ("t0", "setT0"), "Timepoints", @@ -307,7 +312,9 @@ def _log_simulation(rdata: amici_swig.ReturnData): ) -def _ids_and_names_to_rdata(rdata: amici_swig.ReturnData, model: amici_swig.Model): +def _ids_and_names_to_rdata( + rdata: amici_swig.ReturnData, model: amici_swig.Model +): """Copy entity IDs and names from a Model to ReturnData.""" for entity_type in ( "State", @@ -318,6 +325,10 @@ def _ids_and_names_to_rdata(rdata: amici_swig.ReturnData, model: amici_swig.Mode ): for name_or_id in ("Ids", "Names"): names_or_ids = getattr(model, f"get{entity_type}{name_or_id}")() - setattr(rdata, f"{entity_type.lower()}_{name_or_id.lower()}", names_or_ids) + setattr( + rdata, + f"{entity_type.lower()}_{name_or_id.lower()}", + names_or_ids, + ) rdata.state_ids_solver = model.getStateIdsSolver() rdata.state_names_solver = model.getStateNamesSolver() diff --git a/python/tests/test_preequilibration.py b/python/tests/test_preequilibration.py index 8fc0defb76..72fc61636c 100644 --- a/python/tests/test_preequilibration.py +++ b/python/tests/test_preequilibration.py @@ -1,4 +1,4 @@ -"""Tests for preequilibration""" +"""Tests for pre- and post-equilibration""" import itertools @@ -70,7 +70,8 @@ def preeq_fixture(pysb_example_presimulation_module): [1, 1, 1], ] - return (model, solver, edata, edata_preeq, edata_presim, edata_sim, pscales, plists) + return (model, solver, edata, edata_preeq, edata_presim, edata_sim, + pscales, plists) def test_manual_preequilibration(preeq_fixture): @@ -158,12 +159,13 @@ def test_parameter_reordering(preeq_fixture): rdata_reordered = amici.runAmiciSimulation(model, solver, edata) for ip, p_index in enumerate(plist): - assert np.isclose( + assert_allclose( rdata_ordered["sx"][:, p_index, :], rdata_reordered["sx"][:, ip, :], - 1e-6, - 1e-6, - ).all(), plist + atol=1e-6, + rtol=1e-6, + err_msg=str(dict(variable="sx", plist=plist, p_index=p_index)) + ) def test_data_replicates(preeq_fixture): @@ -268,9 +270,12 @@ def test_parameter_in_expdata(preeq_fixture): rdata_edata = amici.runAmiciSimulation(model, solver, edata) for variable in ["x", "sx"]: - assert np.isclose( - rdata[variable][0, :], rdata_edata[variable][0, :], 1e-6, 1e-6 - ).all(), variable + assert_allclose( + rdata[variable][0, :], rdata_edata[variable][0, :], + atol=1e-6, + rtol=1e-6, + err_msg=str(dict(variable=variable)) + ) def test_raise_presimulation_with_adjoints(preeq_fixture): @@ -363,13 +368,17 @@ def test_equilibration_methods_with_adjoints(preeq_fixture): for setting1, setting2 in itertools.product(settings, settings): # assert correctness of result for variable in ["llh", "sllh"]: - assert np.isclose( - rdatas[setting1][variable], rdatas[setting2][variable], 1e-6, 1e-6 - ).all(), variable + assert_allclose( + rdatas[setting1][variable], rdatas[setting2][variable], + atol=1e-6, + rtol=1e-6, + err_msg=str(dict(variable=variable, setting1=setting1, + setting2=setting2)) + ) def test_newton_solver_equilibration(preeq_fixture): - """Test data replicates""" + """Test newton solver for equilibration""" ( model, @@ -419,13 +428,17 @@ def test_newton_solver_equilibration(preeq_fixture): # assert correct results for variable in ["llh", "sllh", "sx0", "sx_ss", "x_ss"]: - assert np.isclose( - rdatas[settings[0]][variable], rdatas[settings[1]][variable], 1e-5, 1e-5 - ).all(), variable + assert_allclose( + rdatas[settings[0]][variable], + rdatas[settings[1]][variable], + atol=1e-5, + rtol=1e-5, + err_msg=str(dict(variable=variable)) + ) def test_newton_steadystate_check(preeq_fixture): - """Test data replicates""" + """Test NewtonStepSteadyStateCheck solver flag""" ( model, @@ -450,13 +463,14 @@ def test_newton_steadystate_check(preeq_fixture): edata.setObservedData(np.hstack([y, y[0]])) edata.setObservedDataStdDev(np.hstack([stdy, stdy[0]])) + # set sensi method + sensi_meth = amici.SensitivityMethod.forward + solver.setSensitivityMethod(sensi_meth) + solver.setNewtonMaxSteps(100) rdatas = {} for newton_check in [True, False]: - # set sensi method - sensi_meth = amici.SensitivityMethod.forward - solver.setSensitivityMethod(sensi_meth) solver.setNewtonStepSteadyStateCheck(newton_check) # add rdatas @@ -467,9 +481,64 @@ def test_newton_steadystate_check(preeq_fixture): # assert correct results for variable in ["llh", "sllh", "sx0", "sx_ss", "x_ss"]: - assert np.isclose( - rdatas[True][variable], rdatas[False][variable], 1e-6, 1e-6 - ).all(), variable + assert_allclose( + rdatas[True][variable], + rdatas[False][variable], + atol=1e-6, + rtol=1e-6, + err_msg=str(dict(variable=variable, sensi_meth=sensi_meth)) + ) + + +def test_steadystate_computation_mode(preeq_fixture): + """Test newtonOnly and integrationOnly steady-state computation modes""" + ( + model, + solver, + edata, + edata_preeq, + edata_presim, + edata_sim, + pscales, + plists, + ) = preeq_fixture + + sensi_meth = amici.SensitivityMethod.forward + solver.setSensitivityOrder(amici.SensitivityOrder.first) + solver.setSensitivityMethodPreequilibration(sensi_meth) + solver.setNewtonMaxSteps(10) + + rdatas = {} + stst_computation_modes = [ + amici.SteadyStateComputationMode.integrationOnly, + amici.SteadyStateComputationMode.newtonOnly, + ] + for mode in stst_computation_modes: + model.setSteadyStateComputationMode(mode) + rdatas[mode] = amici.runAmiciSimulation(model, solver, edata) + + # assert successful simulation + assert rdatas[mode]["status"] == amici.AMICI_SUCCESS + + assert np.all(rdatas[amici.SteadyStateComputationMode.integrationOnly][ + 'preeq_status'][0] == [0, 1, 0]) + assert rdatas[amici.SteadyStateComputationMode.integrationOnly][ + 'preeq_numsteps'][0][0] == 0 + + assert np.all(rdatas[amici.SteadyStateComputationMode.newtonOnly][ + 'preeq_status'][0] == [1, 0, 0]) + assert rdatas[amici.SteadyStateComputationMode.newtonOnly][ + 'preeq_numsteps'][0][0] > 0 + + # assert correct results + for variable in ["llh", "sllh", "sx0", "sx_ss", "x_ss"]: + assert_allclose( + rdatas[stst_computation_modes[0]][variable], + rdatas[stst_computation_modes[1]][variable], + atol=1e-5, + rtol=1e-5, + err_msg=str(dict(variable=variable, sensi_meth=sensi_meth)) + ) def test_simulation_errors(preeq_fixture): diff --git a/python/tests/test_swig_interface.py b/python/tests/test_swig_interface.py index c2ae631030..09cc4c78af 100644 --- a/python/tests/test_swig_interface.py +++ b/python/tests/test_swig_interface.py @@ -98,8 +98,12 @@ def test_copy_constructors(pysb_example_presimulation_module): # Skipped due to conservation laws in the test model # `pysb_example_presimulation_module.getModel()`. "StateIsNonNegative": None, + "SteadyStateComputationMode": [ + 2, + 1, + ], "SteadyStateSensitivityMode": [ - 0, + 2, 1, ], ("t0", "setT0"): [ diff --git a/src/hdf5.cpp b/src/hdf5.cpp index 6489297ac7..ffc7390053 100644 --- a/src/hdf5.cpp +++ b/src/hdf5.cpp @@ -1169,6 +1169,14 @@ void readModelDataFromHDF5( ); } + if (attributeExists(file, datasetPath, "steadyStateComputationMode")) { + model.setSteadyStateComputationMode( + static_cast(getIntScalarAttribute( + file, datasetPath, "steadyStateComputationMode" + )) + ); + } + if (attributeExists(file, datasetPath, "steadyStateSensitivityMode")) { model.setSteadyStateSensitivityMode( static_cast(getIntScalarAttribute( diff --git a/src/model.cpp b/src/model.cpp index 2aa8ee72ee..017b9cf871 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -993,6 +993,15 @@ void Model::setUnscaledInitialStateSensitivities( sx0data_ = sx0; } +void Model::setSteadyStateComputationMode(const SteadyStateComputationMode mode +) { + steadystate_computation_mode_ = mode; +} + +SteadyStateComputationMode Model::getSteadyStateComputationMode() const { + return steadystate_computation_mode_; +} + void Model::setSteadyStateSensitivityMode(const SteadyStateSensitivityMode mode ) { steadystate_sensitivity_mode_ = mode; diff --git a/src/rdata.cpp b/src/rdata.cpp index 5acce5d6f0..56fc0023c0 100644 --- a/src/rdata.cpp +++ b/src/rdata.cpp @@ -206,7 +206,7 @@ void ReturnData::processPreEquilibration( for (int ip = 0; ip < nplist; ip++) writeSlice(sx_rdata_[ip], slice(sx_ss, ip, nx)); } - /* Get cpu time for Newton solve in milliseconds */ + /* Get cpu time for pre-equilibration in milliseconds */ preeq_cpu_time = preeq.getCPUTime(); preeq_cpu_timeB = preeq.getCPUTimeB(); preeq_numstepsB = preeq.getNumStepsB(); diff --git a/src/steadystateproblem.cpp b/src/steadystateproblem.cpp index c561e6a8c3..e0b98b7a91 100644 --- a/src/steadystateproblem.cpp +++ b/src/steadystateproblem.cpp @@ -58,6 +58,17 @@ SteadystateProblem::SteadystateProblem(Solver const& solver, Model const& model) throw AmiException("Preequilibration using adjoint sensitivities " "is not compatible with using forward " "sensitivities during simulation"); + if (solver.getSensitivityMethod() == SensitivityMethod::forward + && model.getSteadyStateComputationMode() + == SteadyStateComputationMode::newtonOnly + && model.getSteadyStateSensitivityMode() + == SteadyStateSensitivityMode::integrationOnly) + throw AmiException("For forward sensitivity analysis steady-state " + "computation mode 'newtonOnly' and steady-state " + "sensitivity mode 'integrationOnly' are not " + "compatible as numerical integration of the model " + "ODEs and corresponding forward sensitivities ODEs " + "is coupled"); } void SteadystateProblem::workSteadyStateProblem( @@ -104,7 +115,8 @@ void SteadystateProblem::findSteadyState( Solver const& solver, Model& model, int it ) { steady_state_status_.resize(3, SteadyStateStatus::not_run); - /* Turn off Newton's method if newton_maxsteps is set to 0 or + /* Turn off Newton's method if 'integrationOnly' approach is chosen for + steady-state computation or newton_maxsteps is set to 0 or if 'integrationOnly' approach is chosen for sensitivity computation in combination with forward sensitivities approach. The latter is necessary as numerical integration of the model ODEs and corresponding @@ -112,7 +124,9 @@ void SteadystateProblem::findSteadyState( chosen for sensitivity computation it is enforced that steady state is computed only by numerical integration as well. */ bool turnOffNewton - = solver.getNewtonMaxSteps() == 0 + = model.getSteadyStateComputationMode() + == SteadyStateComputationMode::integrationOnly + || solver.getNewtonMaxSteps() == 0 || (model.getSteadyStateSensitivityMode() == SteadyStateSensitivityMode::integrationOnly && ((it == -1 @@ -121,12 +135,15 @@ void SteadystateProblem::findSteadyState( || solver.getSensitivityMethod() == SensitivityMethod::forward )); + bool turnOffSimulation = model.getSteadyStateComputationMode() + == SteadyStateComputationMode::newtonOnly; + /* First, try to run the Newton solver */ if (!turnOffNewton) findSteadyStateByNewtonsMethod(model, false); /* Newton solver didn't work, so try to simulate to steady state */ - if (!checkSteadyStateSuccess()) + if (!turnOffSimulation && !checkSteadyStateSuccess()) findSteadyStateBySimulation(solver, model, it); /* Simulation didn't work, retry the Newton solver from last sim state. */ diff --git a/swig/amici.i b/swig/amici.i index 0015ca1bd0..9eac8e5046 100644 --- a/swig/amici.i +++ b/swig/amici.i @@ -312,6 +312,7 @@ InternalSensitivityMethod = enum('InternalSensitivityMethod') InterpolationType = enum('InterpolationType') LinearMultistepMethod = enum('LinearMultistepMethod') NonlinearSolverIteration = enum('NonlinearSolverIteration') +SteadyStateComputationMode = enum('SteadyStateComputationMode') SteadyStateSensitivityMode = enum('SteadyStateSensitivityMode') SteadyStateStatus = enum('SteadyStateStatus') NewtonDampingFactorMode = enum('NewtonDampingFactorMode') From b8edbcd3b111d1c9a675452c623032558b254706 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 3 Jul 2023 07:39:36 +0200 Subject: [PATCH 08/32] black --- python/sdist/amici/de_export.py | 38 +++++++++------ python/sdist/amici/petab_util.py | 12 ++--- python/sdist/amici/sbml_import.py | 36 +++++++------- python/sdist/amici/swig_wrappers.py | 11 ++--- python/tests/test_preequilibration.py | 47 +++++++++++-------- .../test_sbml_import_special_functions.py | 21 +++++---- 6 files changed, 91 insertions(+), 74 deletions(-) diff --git a/python/sdist/amici/de_export.py b/python/sdist/amici/de_export.py index 333f8f3dc2..96e25861ab 100644 --- a/python/sdist/amici/de_export.py +++ b/python/sdist/amici/de_export.py @@ -51,9 +51,9 @@ from .cxxcodeprinter import AmiciCxxCodePrinter, get_switch_statement from .de_model import * from .import_utils import ( - amici_time_symbol, ObservableTransformation, SBMLException, + amici_time_symbol, generate_flux_symbol, smart_subs_dict, strip_pysb, @@ -1101,7 +1101,7 @@ def transform_dxdt_to_concentration(species_id, dxdt): for llh in si.symbols[SymbolId.LLHY].values() ) - self._process_sbml_rate_of(symbols)# substitute SBML-rateOf constructs + self._process_sbml_rate_of(symbols) # substitute SBML-rateOf constructs def _process_sbml_rate_of(self, symbols) -> None: """Substitute any SBML-rateOf constructs in the model equations""" @@ -1144,7 +1144,12 @@ def get_rate(symbol: sp.Symbol): {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs} ) - for component in chain(self.observables(), self.expressions(), self.events(), self._algebraic_equations): + for component in chain( + self.observables(), + self.expressions(), + self.events(), + self._algebraic_equations, + ): if rate_ofs := component.get_val().find(rate_of_func): if isinstance(component, Event): # TODO froot(...) can currently not depend on `w`, so this substitution fails for non-zero rates @@ -1181,7 +1186,6 @@ def get_rate(symbol: sp.Symbol): # {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs} # ) - def add_component( self, component: ModelQuantity, insert_first: Optional[bool] = False ) -> None: @@ -2084,8 +2088,9 @@ def _compute_equation(self, name: str) -> None: # need to check if equations are zero since we are using # symbols - if not smart_is_zero_matrix(self.eq("stau")[ie]) \ - and not smart_is_zero_matrix(self.eq("xdot")): + if not smart_is_zero_matrix( + self.eq("stau")[ie] + ) and not smart_is_zero_matrix(self.eq("xdot")): tmp_eq += smart_multiply( self.sym("xdot_old") - self.sym("xdot"), self.sym("stau").T, @@ -2108,7 +2113,9 @@ def _compute_equation(self, name: str) -> None: ) # additional part of chain rule state variables - tmp_dxdp += smart_multiply(self.sym("xdot_old"), self.sym("stau").T) + tmp_dxdp += smart_multiply( + self.sym("xdot_old"), self.sym("stau").T + ) # finish chain rule for the state variables tmp_eq += smart_multiply(self.eq("ddeltaxdx")[ie], tmp_dxdp) @@ -2839,11 +2846,14 @@ def _generate_c_code(self) -> None: # only generate for those that have nontrivial implementation, # check for both basic variables (not in functions) and function # computed values - if (( - name in self.functions - and not self.functions[name].body - and name not in nobody_functions - ) or name not in self.functions) and len(self.model.sym(name)) == 0: + if ( + ( + name in self.functions + and not self.functions[name].body + and name not in nobody_functions + ) + or name not in self.functions + ) and len(self.model.sym(name)) == 0: continue self._write_index_files(name) @@ -3064,8 +3074,8 @@ def _write_function_file(self, function: str) -> None: iszero = len(self.model.sym(sym)) == 0 if iszero and not ( - (sym == "y" and "Jy" in function) - or (sym == "w" and "xdot" in function and len(self.model.sym(sym))) + (sym == "y" and "Jy" in function) + or (sym == "w" and "xdot" in function and len(self.model.sym(sym))) ): continue diff --git a/python/sdist/amici/petab_util.py b/python/sdist/amici/petab_util.py index 9108b108bc..a9666d84ac 100644 --- a/python/sdist/amici/petab_util.py +++ b/python/sdist/amici/petab_util.py @@ -27,9 +27,7 @@ def get_states_in_condition_table( raise NotImplementedError() species_check_funs = { - MODEL_TYPE_SBML: lambda x: _element_is_sbml_state( - petab_problem.sbml_model, x - ), + MODEL_TYPE_SBML: lambda x: _element_is_sbml_state(petab_problem.sbml_model, x), MODEL_TYPE_PYSB: lambda x: _element_is_pysb_pattern( petab_problem.model.model, x ), @@ -38,9 +36,7 @@ def get_states_in_condition_table( resolve_mapping(petab_problem.mapping_df, col): (None, None) if condition is None else ( - petab_problem.condition_df.loc[ - condition[SIMULATION_CONDITION_ID], col - ], + petab_problem.condition_df.loc[condition[SIMULATION_CONDITION_ID], col], petab_problem.condition_df.loc[ condition[PREEQUILIBRATION_CONDITION_ID], col ] @@ -64,9 +60,7 @@ def get_states_in_condition_table( pysb.bng.generate_equations(petab_problem.model.model) try: - spm = pysb.pattern.SpeciesPatternMatcher( - model=petab_problem.model.model - ) + spm = pysb.pattern.SpeciesPatternMatcher(model=petab_problem.model.model) except NotImplementedError as e: raise NotImplementedError( "Requires https://github.com/pysb/pysb/pull/570. " diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index 25b54f5c93..852ff7e769 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -812,13 +812,13 @@ def _process_species_initial(self): # don't assign this since they need to stay in order sorted_species = toposort_symbols(self.symbols[SymbolId.SPECIES], "init") - for species, rateof_dummies in zip(self.symbols[SymbolId.SPECIES].values(), all_rateof_dummies): + for species, rateof_dummies in zip( + self.symbols[SymbolId.SPECIES].values(), all_rateof_dummies + ): species["init"] = _dummy_to_rateof( - smart_subs_dict(species["init"], sorted_species, "init"), - rateof_dummies + smart_subs_dict(species["init"], sorted_species, "init"), rateof_dummies ) - @log_execution_time("processing SBML rate rules", logger) def _process_rate_rules(self): """ @@ -1004,14 +1004,14 @@ def _process_parameters(self, constant_parameters: List[str] = None) -> None: # so far, this concerns parameters with initial assignments containing rateOf(.) # (those have been skipped above) for par in self.sbml.getListOfParameters(): - if (ia := self._get_element_initial_assignment(par.getId())) is not None \ - and ia.find(sp.core.function.UndefinedFunction("rateOf")): + if ( + ia := self._get_element_initial_assignment(par.getId()) + ) is not None and ia.find(sp.core.function.UndefinedFunction("rateOf")): self.symbols[SymbolId.EXPRESSION][_get_identifier_symbol(par)] = { "name": par.getName() if par.isSetName() else par.getId(), "value": ia, } - @log_execution_time("processing SBML reactions", logger) def _process_reactions(self): """ @@ -2562,11 +2562,14 @@ def _get_list_of_species_references( ListOfSpeciesReferences """ return [ - reference - for reaction in sbml_model.getListOfReactions() - for reference in - itt.chain(reaction.getListOfReactants(), reaction.getListOfProducts(), reaction.getListOfModifiers()) - ] + reference + for reaction in sbml_model.getListOfReactions() + for reference in itt.chain( + reaction.getListOfReactants(), + reaction.getListOfProducts(), + reaction.getListOfModifiers(), + ) + ] def replace_logx(math_str: Union[str, float, None]) -> Union[str, float, None]: @@ -2701,11 +2704,12 @@ def _rateof_to_dummy(sym_math): [...substitute...] sym_math = _dummy_to_rateof(sym_math, rateof_to_dummy) """ - if rate_ofs := sym_math.find( - sp.core.function.UndefinedFunction("rateOf") - ): + if rate_ofs := sym_math.find(sp.core.function.UndefinedFunction("rateOf")): # replace by dummies to avoid species substitution - rateof_dummies = {rate_of: sp.Dummy(f"Dummy_RateOf_{rate_of.args[0].name}") for rate_of in rate_ofs} + rateof_dummies = { + rate_of: sp.Dummy(f"Dummy_RateOf_{rate_of.args[0].name}") + for rate_of in rate_ofs + } return sym_math.subs(rateof_dummies), rateof_dummies return sym_math, {} diff --git a/python/sdist/amici/swig_wrappers.py b/python/sdist/amici/swig_wrappers.py index a732fa7d79..50e78daf39 100644 --- a/python/sdist/amici/swig_wrappers.py +++ b/python/sdist/amici/swig_wrappers.py @@ -1,7 +1,6 @@ """Convenience wrappers for the swig interface""" import logging import sys - import warnings from contextlib import contextmanager, suppress from typing import Any, Dict, List, Optional, Sequence, Union @@ -106,8 +105,7 @@ def runAmiciSimulation( """ if ( model.ne > 0 - and solver.getSensitivityMethod() - == amici_swig.SensitivityMethod.adjoint + and solver.getSensitivityMethod() == amici_swig.SensitivityMethod.adjoint and solver.getSensitivityOrder() == amici_swig.SensitivityOrder.first ): warnings.warn( @@ -168,8 +166,7 @@ def runAmiciSimulations( """ if ( model.ne > 0 - and solver.getSensitivityMethod() - == amici_swig.SensitivityMethod.adjoint + and solver.getSensitivityMethod() == amici_swig.SensitivityMethod.adjoint and solver.getSensitivityOrder() == amici_swig.SensitivityOrder.first ): warnings.warn( @@ -312,9 +309,7 @@ def _log_simulation(rdata: amici_swig.ReturnData): ) -def _ids_and_names_to_rdata( - rdata: amici_swig.ReturnData, model: amici_swig.Model -): +def _ids_and_names_to_rdata(rdata: amici_swig.ReturnData, model: amici_swig.Model): """Copy entity IDs and names from a Model to ReturnData.""" for entity_type in ( "State", diff --git a/python/tests/test_preequilibration.py b/python/tests/test_preequilibration.py index 72fc61636c..d797c4bf3b 100644 --- a/python/tests/test_preequilibration.py +++ b/python/tests/test_preequilibration.py @@ -70,8 +70,7 @@ def preeq_fixture(pysb_example_presimulation_module): [1, 1, 1], ] - return (model, solver, edata, edata_preeq, edata_presim, edata_sim, - pscales, plists) + return (model, solver, edata, edata_preeq, edata_presim, edata_sim, pscales, plists) def test_manual_preequilibration(preeq_fixture): @@ -164,7 +163,7 @@ def test_parameter_reordering(preeq_fixture): rdata_reordered["sx"][:, ip, :], atol=1e-6, rtol=1e-6, - err_msg=str(dict(variable="sx", plist=plist, p_index=p_index)) + err_msg=str(dict(variable="sx", plist=plist, p_index=p_index)), ) @@ -271,10 +270,11 @@ def test_parameter_in_expdata(preeq_fixture): rdata_edata = amici.runAmiciSimulation(model, solver, edata) for variable in ["x", "sx"]: assert_allclose( - rdata[variable][0, :], rdata_edata[variable][0, :], + rdata[variable][0, :], + rdata_edata[variable][0, :], atol=1e-6, rtol=1e-6, - err_msg=str(dict(variable=variable)) + err_msg=str(dict(variable=variable)), ) @@ -369,11 +369,13 @@ def test_equilibration_methods_with_adjoints(preeq_fixture): # assert correctness of result for variable in ["llh", "sllh"]: assert_allclose( - rdatas[setting1][variable], rdatas[setting2][variable], + rdatas[setting1][variable], + rdatas[setting2][variable], atol=1e-6, rtol=1e-6, - err_msg=str(dict(variable=variable, setting1=setting1, - setting2=setting2)) + err_msg=str( + dict(variable=variable, setting1=setting1, setting2=setting2) + ), ) @@ -433,7 +435,7 @@ def test_newton_solver_equilibration(preeq_fixture): rdatas[settings[1]][variable], atol=1e-5, rtol=1e-5, - err_msg=str(dict(variable=variable)) + err_msg=str(dict(variable=variable)), ) @@ -486,7 +488,7 @@ def test_newton_steadystate_check(preeq_fixture): rdatas[False][variable], atol=1e-6, rtol=1e-6, - err_msg=str(dict(variable=variable, sensi_meth=sensi_meth)) + err_msg=str(dict(variable=variable, sensi_meth=sensi_meth)), ) @@ -520,15 +522,22 @@ def test_steadystate_computation_mode(preeq_fixture): # assert successful simulation assert rdatas[mode]["status"] == amici.AMICI_SUCCESS - assert np.all(rdatas[amici.SteadyStateComputationMode.integrationOnly][ - 'preeq_status'][0] == [0, 1, 0]) - assert rdatas[amici.SteadyStateComputationMode.integrationOnly][ - 'preeq_numsteps'][0][0] == 0 + assert np.all( + rdatas[amici.SteadyStateComputationMode.integrationOnly]["preeq_status"][0] + == [0, 1, 0] + ) + assert ( + rdatas[amici.SteadyStateComputationMode.integrationOnly]["preeq_numsteps"][0][0] + == 0 + ) - assert np.all(rdatas[amici.SteadyStateComputationMode.newtonOnly][ - 'preeq_status'][0] == [1, 0, 0]) - assert rdatas[amici.SteadyStateComputationMode.newtonOnly][ - 'preeq_numsteps'][0][0] > 0 + assert np.all( + rdatas[amici.SteadyStateComputationMode.newtonOnly]["preeq_status"][0] + == [1, 0, 0] + ) + assert ( + rdatas[amici.SteadyStateComputationMode.newtonOnly]["preeq_numsteps"][0][0] > 0 + ) # assert correct results for variable in ["llh", "sllh", "sx0", "sx_ss", "x_ss"]: @@ -537,7 +546,7 @@ def test_steadystate_computation_mode(preeq_fixture): rdatas[stst_computation_modes[1]][variable], atol=1e-5, rtol=1e-5, - err_msg=str(dict(variable=variable, sensi_meth=sensi_meth)) + err_msg=str(dict(variable=variable, sensi_meth=sensi_meth)), ) diff --git a/python/tests/test_sbml_import_special_functions.py b/python/tests/test_sbml_import_special_functions.py index fa9d94579f..bd8d071109 100644 --- a/python/tests/test_sbml_import_special_functions.py +++ b/python/tests/test_sbml_import_special_functions.py @@ -6,14 +6,13 @@ import os +import amici import numpy as np import pytest -from numpy.testing import assert_array_almost_equal_nulp, assert_approx_equal -from scipy.special import loggamma - -import amici from amici.gradient_check import check_derivatives from amici.testing import TemporaryDirectoryWinSafe, skip_on_valgrind +from numpy.testing import assert_approx_equal, assert_array_almost_equal_nulp +from scipy.special import loggamma @pytest.fixture(scope="session") @@ -156,6 +155,7 @@ def negative_binomial_nllh(m: np.ndarray, y: np.ndarray, p: float): - m * np.log(p) ) + @pytest.mark.filterwarnings("ignore:the imp module is deprecated:DeprecationWarning") def test_rateof(): """Test chained rateOf to verify that model expressions are evaluated in the correct order.""" @@ -186,7 +186,9 @@ def test_rateof(): model_name=module_name, output_dir=outdir, ) - model_module = amici.import_model_module(module_name=module_name, module_path=outdir) + model_module = amici.import_model_module( + module_name=module_name, module_path=outdir + ) amici_model = model_module.getModel() t = np.linspace(0, 10, 11) amici_model.setTimepoints(t) @@ -200,12 +202,15 @@ def test_rateof(): i_p2 = state_ids_solver.index("p2") assert_approx_equal(rdata["xdot"][i_S3], 1) assert_approx_equal(rdata["xdot"][i_S2], 2) - assert_approx_equal(rdata["xdot"][i_S1], rdata.by_id("S2")[-1] + rdata["xdot"][i_S2]) + assert_approx_equal( + rdata["xdot"][i_S1], rdata.by_id("S2")[-1] + rdata["xdot"][i_S2] + ) assert_approx_equal(rdata["xdot"][i_S1], rdata["xdot"][i_p2]) assert_array_almost_equal_nulp(rdata.by_id("S3"), t, 10) assert_array_almost_equal_nulp(rdata.by_id("S2"), 2 * rdata.by_id("S3")) - assert_array_almost_equal_nulp(rdata.by_id("S4")[1:], 0.5 * np.diff(rdata.by_id("S3")), 10) + assert_array_almost_equal_nulp( + rdata.by_id("S4")[1:], 0.5 * np.diff(rdata.by_id("S3")), 10 + ) assert_array_almost_equal_nulp(rdata.by_id("p3"), 0) assert_array_almost_equal_nulp(rdata.by_id("p2"), 1 + rdata.by_id("S1")) - From 752b0e58de6829fbc974915742f187006a56fb5b Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 4 Jul 2023 11:57:12 +0200 Subject: [PATCH 09/32] SBML import: Allow hardcoding of numerical values (#2134) Allows selecting parameters whose values are to be hard-coded (#1192). So far, restricted to parameters that aren't targets of rule or initial assignments. This can be extended later: lifting those restrictions on parameters, allow hard-coding Species with constant=True, ... --- python/sdist/amici/sbml_import.py | 76 ++++++++++++++++++++++++++----- python/tests/test_sbml_import.py | 33 +++++++++++++- 2 files changed, 97 insertions(+), 12 deletions(-) diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index 852ff7e769..7a3f76f657 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -13,7 +13,18 @@ import warnings import xml.etree.ElementTree as ET from pathlib import Path -from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Sequence, + Set, + Tuple, + Union, +) import libsbml as sbml import numpy as np @@ -281,6 +292,7 @@ def sbml2amici( cache_simplify: bool = False, log_as_log10: bool = True, generate_sensitivity_code: bool = True, + hardcode_symbols: Sequence[str] = None, ) -> None: """ Generate and compile AMICI C++ files for the model provided to the @@ -385,6 +397,12 @@ def sbml2amici( :param generate_sensitivity_code: If ``False``, the code required for sensitivity computation will not be generated + + :param hardcode_symbols: + List of SBML entitiy IDs that are to be hardcoded in the generated model. + Their values cannot be changed anymore after model import. + Currently only parameters that are not targets of rules or + initial assignments are supported. """ set_log_level(logger, verbose) @@ -401,6 +419,7 @@ def sbml2amici( simplify=simplify, cache_simplify=cache_simplify, log_as_log10=log_as_log10, + hardcode_symbols=hardcode_symbols, ) exporter = DEExporter( @@ -437,6 +456,7 @@ def _build_ode_model( simplify: Optional[Callable] = _default_simplify, cache_simplify: bool = False, log_as_log10: bool = True, + hardcode_symbols: Sequence[str] = None, ) -> DEModel: """Generate an ODEModel from this SBML model. @@ -444,6 +464,13 @@ def _build_ode_model( """ constant_parameters = list(constant_parameters) if constant_parameters else [] + hardcode_symbols = set(hardcode_symbols) if hardcode_symbols else {} + if invalid := (set(constant_parameters) & set(hardcode_symbols)): + raise ValueError( + "The following parameters were selected as both constant " + f"and hard-coded which is not allowed: {invalid}" + ) + if sigmas is None: sigmas = {} @@ -460,7 +487,9 @@ def _build_ode_model( self.sbml_parser_settings.setParseLog( sbml.L3P_PARSE_LOG_AS_LOG10 if log_as_log10 else sbml.L3P_PARSE_LOG_AS_LN ) - self._process_sbml(constant_parameters) + self._process_sbml( + constant_parameters=constant_parameters, hardcode_symbols=hardcode_symbols + ) if ( self.symbols.get(SymbolId.EVENT, False) @@ -496,18 +525,26 @@ def _build_ode_model( return ode_model @log_execution_time("importing SBML", logger) - def _process_sbml(self, constant_parameters: List[str] = None) -> None: + def _process_sbml( + self, + constant_parameters: List[str] = None, + hardcode_symbols: Sequence[str] = None, + ) -> None: """ Read parameters, species, reactions, and so on from SBML model :param constant_parameters: SBML Ids identifying constant parameters + :param hardcode_parameters: + Parameter IDs to be replaced by their values in the generated model. """ if not self._discard_annotations: self._process_annotations() self.check_support() - self._gather_locals() - self._process_parameters(constant_parameters) + self._gather_locals(hardcode_symbols=hardcode_symbols) + self._process_parameters( + constant_parameters=constant_parameters, hardcode_symbols=hardcode_symbols + ) self._process_compartments() self._process_species() self._process_reactions() @@ -639,7 +676,7 @@ def check_event_support(self) -> None: ) @log_execution_time("gathering local SBML symbols", logger) - def _gather_locals(self) -> None: + def _gather_locals(self, hardcode_symbols: Sequence[str] = None) -> None: """ Populate self.local_symbols with all model entities. @@ -647,10 +684,10 @@ def _gather_locals(self) -> None: shadowing model entities as well as to avoid possibly costly symbolic substitutions """ - self._gather_base_locals() + self._gather_base_locals(hardcode_symbols=hardcode_symbols) self._gather_dependent_locals() - def _gather_base_locals(self): + def _gather_base_locals(self, hardcode_symbols: Sequence[str] = None) -> None: """ Populate self.local_symbols with pure symbol definitions that do not depend on any other symbol. @@ -677,8 +714,20 @@ def _gather_base_locals(self): ): if not c.isSetId(): continue - - self.add_local_symbol(c.getId(), _get_identifier_symbol(c)) + if c.getId() in hardcode_symbols: + if c.getConstant() is not True: + # disallow anything that can be changed by rules/reaction/events + raise ValueError( + f"Cannot hardcode non-constant symbol `{c.getId()}`." + ) + if self.sbml.getInitialAssignment(c.getId()): + raise NotImplementedError( + f"Cannot hardcode symbol `{c.getId()}` " + "that is an initial assignment target." + ) + self.add_local_symbol(c.getId(), sp.Float(c.getValue())) + else: + self.add_local_symbol(c.getId(), _get_identifier_symbol(c)) for x_ref in _get_list_of_species_references(self.sbml): if not x_ref.isSetId(): @@ -940,7 +989,11 @@ def _process_annotations(self) -> None: self.sbml.removeParameter(parameter_id) @log_execution_time("processing SBML parameters", logger) - def _process_parameters(self, constant_parameters: List[str] = None) -> None: + def _process_parameters( + self, + constant_parameters: List[str] = None, + hardcode_symbols: Sequence[str] = None, + ) -> None: """ Get parameter information from SBML model. @@ -983,6 +1036,7 @@ def _process_parameters(self, constant_parameters: List[str] = None) -> None: if parameter.getId() not in constant_parameters and self._get_element_initial_assignment(parameter.getId()) is None and not self.is_assignment_rule_target(parameter) + and parameter.getId() not in hardcode_symbols ] loop_settings = { diff --git a/python/tests/test_sbml_import.py b/python/tests/test_sbml_import.py index d0ce9cae5c..41ccdd925c 100644 --- a/python/tests/test_sbml_import.py +++ b/python/tests/test_sbml_import.py @@ -38,7 +38,7 @@ def simple_sbml_model(): model.addSpecies(s1) p1 = model.createParameter() p1.setId("p1") - p1.setValue(0.0) + p1.setValue(2.0) model.addParameter(p1) return document, model @@ -662,3 +662,34 @@ def test_code_gen_uses_lhs_symbol_ids(): ) dwdx = Path(tmpdir, "dwdx.cpp").read_text() assert "dobservable_x1_dx1 = " in dwdx + + +def test_hardcode_parameters(simple_sbml_model): + """Test model generation works for model without observables""" + sbml_doc, sbml_model = simple_sbml_model + sbml_importer = SbmlImporter(sbml_source=sbml_model, from_file=False) + r = sbml_model.createRateRule() + r.setVariable("S1") + r.setFormula("p1") + assert sbml_model.getParameter("p1").getValue() != 0 + + ode_model = sbml_importer._build_ode_model() + assert str(ode_model.parameters()) == "[p1]" + assert ode_model.differential_states()[0].get_dt().name == "p1" + + ode_model = sbml_importer._build_ode_model( + constant_parameters=[], + hardcode_symbols=["p1"], + ) + assert str(ode_model.parameters()) == "[]" + assert ( + ode_model.differential_states()[0].get_dt() + == sbml_model.getParameter("p1").getValue() + ) + + with pytest.raises(ValueError): + sbml_importer._build_ode_model( + # mutually exclusive + constant_parameters=["p1"], + hardcode_symbols=["p1"], + ) From e7b06433640ddd5161d039b2f493db8af9fdb2df Mon Sep 17 00:00:00 2001 From: Polina Lakrisenko Date: Thu, 6 Jul 2023 09:18:54 +0200 Subject: [PATCH 10/32] prevent the second run of newton's method in case simulation was turned off (#2137) --- src/steadystateproblem.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/steadystateproblem.cpp b/src/steadystateproblem.cpp index e0b98b7a91..816d0a4c53 100644 --- a/src/steadystateproblem.cpp +++ b/src/steadystateproblem.cpp @@ -147,7 +147,7 @@ void SteadystateProblem::findSteadyState( findSteadyStateBySimulation(solver, model, it); /* Simulation didn't work, retry the Newton solver from last sim state. */ - if (!turnOffNewton && !checkSteadyStateSuccess()) + if (!turnOffNewton && !turnOffSimulation && !checkSteadyStateSuccess()) findSteadyStateByNewtonsMethod(model, true); /* Nothing worked, throw an as informative error as possible */ From a42ede278516a03d931b04f85fccd7d7f4e40d27 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 6 Jul 2023 11:17:53 +0200 Subject: [PATCH 11/32] Fix steadystate sensitivity mode for performance test (#2135) * More output * newtonOnly --- tests/performance/test.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/performance/test.py b/tests/performance/test.py index 596cdca2f7..13a640e3e1 100755 --- a/tests/performance/test.py +++ b/tests/performance/test.py @@ -36,12 +36,19 @@ def check_results(rdata): "numerrtestfailsB", "numnonlinsolvconvfails", "numnonlinsolvconvfailsB", + "preeq_status", + "preeq_numsteps", + "preeq_numstepsB", "preeq_cpu_time", "preeq_cpu_timeB", + "cpu_time_total", "cpu_time", "cpu_timeB", + "posteq_status", "posteq_cpu_time", "posteq_cpu_timeB", + "posteq_numsteps", + "posteq_numstepsB", ] for d in diagnostics: print(d, rdata[d]) @@ -114,6 +121,7 @@ def prepare_simulation(arg, model, solver, edata): model.setParameters([0.1 for _ in tmp_par]) solver.setSensitivityMethod(amici.SensitivityMethod.forward) solver.setSensitivityOrder(amici.SensitivityOrder.first) + model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly) edata.setTimepoints([float("inf")]) elif arg == "adjoint_steadystate_sensitivities_non_optimal_parameters": tmp_par = model.getParameters() From 9f9a6ab2d75cfd2b8f5629f66b1dfea56bb6d7ee Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 6 Jul 2023 12:29:10 +0200 Subject: [PATCH 12/32] Increase maxSteps for ExampleSplinesSwameye2003.ipynb (#2136) Should avoid most `mxstep steps taken before reaching tout` failures (e.g. https://github.com/AMICI-dev/AMICI/actions/runs/5453350972/jobs/9922075162) --- .../example_splines_swameye/ExampleSplinesSwameye2003.ipynb | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb index 6ef92d4c49..8846974330 100644 --- a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb +++ b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb @@ -351,7 +351,10 @@ "outputs": [], "source": [ "# Import PEtab problem into pyPESTO\n", - "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()" + "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()\n", + "\n", + "# Increase maximum number of steps for AMICI\n", + "pypesto_problem.objective.amici_solver.setMaxSteps(10**5)" ] }, { From 3d7bbc4503868b3fced3b5ec0206720872c16ca6 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 6 Jul 2023 14:57:20 +0200 Subject: [PATCH 13/32] Doc: Update reference list (#2138) Closes #2040 --- documentation/amici_refs.bib | 41 ++++++++++++++++++++++++++++++++++++ documentation/background.rst | 4 ++++ documentation/references.md | 23 +++++++++++++++++++- 3 files changed, 67 insertions(+), 1 deletion(-) diff --git a/documentation/amici_refs.bib b/documentation/amici_refs.bib index a5f85f7a10..34c2cf1ba1 100644 --- a/documentation/amici_refs.bib +++ b/documentation/amici_refs.bib @@ -1144,6 +1144,47 @@ @Article{ErdemMut2022 url = {https://doi.org/10.1038/s41467-022-31138-1}, } +@Article{ContentoSta2023, + author = {Lorenzo Contento and Paul Stapor and Daniel Weindl and Jan Hasenauer}, + journal = {bioRxiv}, + title = {A more expressive spline representation for {SBML} models improves code generation performance in {AMICI}}, + year = {2023}, + abstract = {Spline interpolants are commonly used for discretizing and estimating functions in mathematical models. While splines can be encoded in the Systems Biology Markup Language (SBML) using piecewise functions, the resulting formulas are very complex and difficult to derive by hand. Tools to create such formulas exist but only deal with numeric data and thus cannot be used for function estimation. Similarly, simulation tools suffer from several limitations when handling splines. For example, in the AMICI library splines with large numbers of nodes lead to long model import times. We have developed a set of SBML annotations to mark assignment rules as spline formulas. These compact representations are human-readable and easy to edit, in contrast to the piecewise representation. Different boundary conditions and extrapolation methods can also be specified. By extending AMICI to create and recognize these annotations, model import can be sped up significantly. This allows practitioners to increase the expressivity of their models. While the performance improvement is limited to AMICI, our tools for creating spline formulas can be used for other tools as well and our syntax for compact spline representation may be a starting point for an SBML-native way to represent spline interpolants.Competing Interest StatementThe authors have declared no competing interest.}, + creationdate = {2023-07-06T10:25:17}, + doi = {10.1101/2023.06.29.547120}, + elocation-id = {2023.06.29.547120}, + eprint = {https://www.biorxiv.org/content/early/2023/07/01/2023.06.29.547120.full.pdf}, + modificationdate = {2023-07-06T10:25:30}, + publisher = {Cold Spring Harbor Laboratory}, + url = {https://www.biorxiv.org/content/early/2023/07/01/2023.06.29.547120}, +} + +@InBook{Froehlich2023, + author = {Fr{\"o}hlich, Fabian}, + editor = {Nguyen, Lan K.}, + pages = {59--86}, + publisher = {Springer US}, + title = {A Practical Guide for the Efficient Formulation and Calibration of Large, Energy- and Rule-Based Models of Cellular Signal Transduction}, + year = {2023}, + address = {New York, NY}, + isbn = {978-1-0716-3008-2}, + abstract = {Aberrant signal transduction leads to complex diseases such as cancer. To rationally design treatment strategies with small molecule inhibitors, computational models have to be employed. Energy- and rule-based models allow the construction of mechanistic ordinary differential equation models based on structural insights. The detailed, energy-based description often generates large models, which are difficult to calibrate on experimental data. In this chapter, we provide a detailed, interactive protocol for the programmatic formulation and calibration of such large, energy- and rule-based models of cellular signal transduction based on an example model describing the action of RAF inhibitors on MAPK signaling. An interactive version of this chapter is available as Jupyter Notebook at github.com/FFroehlich/energy{\_}modeling{\_}chapter.}, + booktitle = {Computational Modeling of Signaling Networks}, + creationdate = {2023-07-06T10:31:07}, + doi = {10.1007/978-1-0716-3008-2_3}, + modificationdate = {2023-07-06T10:36:35}, + url = {https://doi.org/10.1007/978-1-0716-3008-2_3}, +} + +@Misc{SluijsZho2023, + author = {Bob van Sluijs and Tao Zhou and Britta Helwig and Mathieu Baltussen and Frank Nelissen and Hans Heus and Wilhelm Huck}, + title = {Inverse Design of Enzymatic Reaction Network States}, + year = {2023}, + creationdate = {2023-07-06T10:39:46}, + doi = {10.21203/rs.3.rs-2646906/v1}, + modificationdate = {2023-07-06T10:40:37}, +} + @Comment{jabref-meta: databaseType:bibtex;} @Comment{jabref-meta: grouping: diff --git a/documentation/background.rst b/documentation/background.rst index 6c0dd9005b..2a2e748672 100644 --- a/documentation/background.rst +++ b/documentation/background.rst @@ -37,6 +37,10 @@ publications: of Biochemical Reaction Networks.** *bioRxiv* 2022.08.08.503176. DOI: `10.1101/2022.08.08.503176 `_. +* L. Contento, P. Stapor, D. Weindl, and J. Hasenauer, "A more expressive spline + representation for SBML models improves code generation performance in AMICI," + bioRxiv, 2023, DOI: `10.1101/2023.06.29.547120 `_. + .. note:: Implementation details of the latest AMICI versions may differ from the ones diff --git a/documentation/references.md b/documentation/references.md index 2cdadde654..d0fec0f88b 100644 --- a/documentation/references.md +++ b/documentation/references.md @@ -1,6 +1,6 @@ # References -List of publications using AMICI. Total number is 74. +List of publications using AMICI. Total number is 77. If you applied AMICI in your work and your publication is missing, please let us know via a new GitHub issue. @@ -22,6 +22,21 @@ Reveals Time-Dependent Test Efficiency and Infectious Contacts.” Epidemics 43: 100681. https://doi.org/10.1016/j.epidem.2023.100681. +
+Contento, Lorenzo, Paul Stapor, Daniel Weindl, and Jan Hasenauer. 2023. +“A More Expressive Spline Representation for SBML +Models Improves Code Generation Performance in +AMICI.” bioRxiv. https://doi.org/10.1101/2023.06.29.547120. +
+
+Fröhlich, Fabian. 2023. “A Practical Guide for the Efficient +Formulation and Calibration of Large, Energy- and Rule-Based Models of +Cellular Signal Transduction.” In Computational Modeling of +Signaling Networks, edited by Lan K. Nguyen, 59–86. New York, NY: +Springer US. https://doi.org/10.1007/978-1-0716-3008-2_3. +
Fröhlich, Fabian, Luca Gerosa, Jeremy Muhlich, and Peter K Sorger. 2023. “Mechanistic Model of MAPK Signaling Reveals How Allostery and @@ -45,6 +60,12 @@ Saccharomyces Cerevisiae.” Metabolic Engineering 75: 12–18. https://doi.org/10.1016/j.ymben.2022.11.003.
+
+Sluijs, Bob van, Tao Zhou, Britta Helwig, Mathieu Baltussen, Frank +Nelissen, Hans Heus, and Wilhelm Huck. 2023. “Inverse Design of +Enzymatic Reaction Network States.” https://doi.org/10.21203/rs.3.rs-2646906/v1. +

2022

Date: Mon, 10 Jul 2023 12:21:36 +0200 Subject: [PATCH 14/32] Simplify / document antimony import (#2142) * Wrapper for importing antimony models, this will allow for more concise test cases in the future * Remove tellurium dependency, use libantimony directly (so we don't need ncurses and other deps) * Add antimony example --- .github/workflows/test_python_cplusplus.yml | 1 - .github/workflows/test_python_ver_matrix.yml | 3 +- documentation/python_interface.rst | 54 ++++++++++++++++- python/sdist/amici/antimony_import.py | 58 +++++++++++++++++++ python/sdist/setup.cfg | 2 +- python/tests/test_antimony_import.py | 42 ++++++++++++++ .../test_sbml_import_special_functions.py | 11 ++-- scripts/runNotebook.sh | 3 +- 8 files changed, 159 insertions(+), 15 deletions(-) create mode 100644 python/sdist/amici/antimony_import.py create mode 100644 python/tests/test_antimony_import.py diff --git a/.github/workflows/test_python_cplusplus.yml b/.github/workflows/test_python_cplusplus.yml index 9fecd9825a..8a4eaa3bb8 100644 --- a/.github/workflows/test_python_cplusplus.yml +++ b/.github/workflows/test_python_cplusplus.yml @@ -64,7 +64,6 @@ jobs: libboost-serialization-dev \ libboost-chrono-dev \ libhdf5-serial-dev \ - libncurses5 \ python3-venv \ swig \ lcov \ diff --git a/.github/workflows/test_python_ver_matrix.yml b/.github/workflows/test_python_ver_matrix.yml index e790ffdd79..59dcf91041 100644 --- a/.github/workflows/test_python_ver_matrix.yml +++ b/.github/workflows/test_python_ver_matrix.yml @@ -49,8 +49,7 @@ jobs: swig \ libatlas-base-dev \ libhdf5-serial-dev \ - libboost-math-dev \ - libncurses5 + libboost-math-dev # install AMICI - name: Build BNGL diff --git a/documentation/python_interface.rst b/documentation/python_interface.rst index 8ccc77e661..a919e925c4 100644 --- a/documentation/python_interface.rst +++ b/documentation/python_interface.rst @@ -133,9 +133,57 @@ Importing plain ODEs The AMICI Python interface does not currently support direct import of ODEs. However, it is straightforward to encode them as RateRules in an SBML model. -The `yaml2sbml `_ package may come in -handy, as it facilitates generating SBML models from a YAML-based specification -of an ODE model. Besides the SBML model it can also create +The most convenient options to do that are maybe +`Antimony `_ +and `yaml2sbml `_. + +An example using Antimony to specify the Lotka-Volterra equations is shown below: + +.. code-block:: python + + ant_model = """ + + model lotka_volterra + # see https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations + + # initial conditions + prey_density = 10; + predator_density = 10; + + # parameters + prey_growth_rate = 1.1; + predator_effect_on_prey = 0.4; + predator_death_rate = 0.4; + prey_effect_on_predator = 0.1; + + # dx/dt + prey_density' = prey_growth_rate * prey_density - predator_effect_on_prey * prey_density * predator_density; + predator_density' = prey_effect_on_predator * prey_density * predator_density - predator_death_rate * predator_density; + end + """ + module_name = "test_antimony_example_lv" + from amici.antimony_import import antimony2amici + antimony2amici( + ant_model, + model_name=module_name, + output_dir=module_name, + ) + model_module = amici.import_model_module( + module_name=module_name, module_path=outdir + ) + amici_model = model_module.getModel() + amici_model.setTimepoints(np.linspace(0, 100, 200)) + amici_solver = amici_model.getSolver() + rdata = amici.runAmiciSimulation(amici_model, amici_solver) + + from amici.plotting import plot_state_trajectories + plot_state_trajectories(rdata, model=amici_model) + + +The `yaml2sbml `_ package creates SBML models +from a YAML-based specification of an ODE model. Various examples are +`provided `_. +Besides the SBML model, yaml2sbml can also create `PEtab `_ files. SED-ML import diff --git a/python/sdist/amici/antimony_import.py b/python/sdist/amici/antimony_import.py new file mode 100644 index 0000000000..cc23079787 --- /dev/null +++ b/python/sdist/amici/antimony_import.py @@ -0,0 +1,58 @@ +"""Import of Antimony models into AMICI. + +https://antimony.sourceforge.net/ +https://tellurium.readthedocs.io/en/latest/antimony.html +""" +from pathlib import Path +from typing import Union + + +def antimony2sbml(ant_model: Union[str, Path]) -> str: + """Convert Antimony model to SBML. + + :param ant_model: Antimony model as string or path to file + + :returns: + The SBML model as string. + """ + import antimony as ant + + # Unload everything / free memory + ant.clearPreviousLoads() + ant.freeAll() + + try: + # potentially fails because of too long file name + is_file = Path(ant_model).exists() + except OSError: + is_file = False + + if is_file: + status = ant.loadAntimonyFile(ant_model) + else: + status = ant.loadAntimonyString(ant_model) + if status < 0: + raise RuntimeError("Antimony model could not be loaded.") + + if (main_module_name := ant.getMainModuleName()) is None: + raise AssertionError("There is no Antimony module.") + + sbml_str = ant.getSBMLString(main_module_name) + if not sbml_str: + raise ValueError("Antimony model could not be converted to SBML.") + + return sbml_str + + +def antimony2amici(ant_model: Union[str, Path], *args, **kwargs): + """Convert Antimony model to AMICI model. + + Converts the Antimony model provided as string of file to SBML and then imports it into AMICI. + + For documentation see :meth:`amici.sbml_import.SbmlImporter.sbml2amici`. + """ + from .sbml_import import SbmlImporter + + sbml_str = antimony2sbml(ant_model) + sbml_importer = SbmlImporter(sbml_str, from_file=False) + return sbml_importer.sbml2amici(*args, **kwargs) diff --git a/python/sdist/setup.cfg b/python/sdist/setup.cfg index 706c407652..f419578754 100644 --- a/python/sdist/setup.cfg +++ b/python/sdist/setup.cfg @@ -52,7 +52,7 @@ test = pytest-rerunfailures coverage shyaml - tellurium + antimony vis = matplotlib seaborn diff --git a/python/tests/test_antimony_import.py b/python/tests/test_antimony_import.py new file mode 100644 index 0000000000..41af014aa9 --- /dev/null +++ b/python/tests/test_antimony_import.py @@ -0,0 +1,42 @@ +import amici +import numpy as np +from amici.antimony_import import antimony2amici +from amici.testing import TemporaryDirectoryWinSafe as TemporaryDirectory + + +def test_antimony_example(): + """If this example requires changes, please also update documentation/python_interface.rst.""" + ant_model = """ + model lotka_volterra + # see https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations + + # initial conditions + prey_density = 10; + predator_density = 10; + + # parameters + prey_growth_rate = 1.1; + predator_effect_on_prey = 0.4; + predator_death_rate = 0.4; + prey_effect_on_predator = 0.1; + + # dx/dt + prey_density' = prey_growth_rate * prey_density - predator_effect_on_prey * prey_density * predator_density; + predator_density' = prey_effect_on_predator * prey_density * predator_density - predator_death_rate * predator_density; + end + """ + module_name = "test_antimony_example_lv" + with TemporaryDirectory(prefix=module_name) as outdir: + antimony2amici( + ant_model, + model_name=module_name, + output_dir=outdir, + ) + model_module = amici.import_model_module( + module_name=module_name, module_path=outdir + ) + amici_model = model_module.getModel() + amici_model.setTimepoints(np.linspace(0, 100, 200)) + amici_solver = amici_model.getSolver() + rdata = amici.runAmiciSimulation(amici_model, amici_solver) + assert rdata.status == amici.AMICI_SUCCESS diff --git a/python/tests/test_sbml_import_special_functions.py b/python/tests/test_sbml_import_special_functions.py index bd8d071109..1aa806156d 100644 --- a/python/tests/test_sbml_import_special_functions.py +++ b/python/tests/test_sbml_import_special_functions.py @@ -9,6 +9,7 @@ import amici import numpy as np import pytest +from amici.antimony_import import antimony2amici from amici.gradient_check import check_derivatives from amici.testing import TemporaryDirectoryWinSafe, skip_on_valgrind from numpy.testing import assert_approx_equal, assert_array_almost_equal_nulp @@ -156,11 +157,9 @@ def negative_binomial_nllh(m: np.ndarray, y: np.ndarray, p: float): ) -@pytest.mark.filterwarnings("ignore:the imp module is deprecated:DeprecationWarning") +@skip_on_valgrind def test_rateof(): """Test chained rateOf to verify that model expressions are evaluated in the correct order.""" - import tellurium as te - ant_model = """ model test_chained_rateof species S1, S2, S3, S4; @@ -177,12 +176,10 @@ def test_rateof(): p3 = rateOf(rate); end """ - sbml_str = te.antimonyToSBML(ant_model) - sbml_importer = amici.SbmlImporter(sbml_str, from_file=False) - module_name = "test_chained_rateof" with TemporaryDirectoryWinSafe(prefix=module_name) as outdir: - sbml_importer.sbml2amici( + antimony2amici( + ant_model, model_name=module_name, output_dir=outdir, ) diff --git a/scripts/runNotebook.sh b/scripts/runNotebook.sh index c0f246d7bb..bf1ef8d5e0 100755 --- a/scripts/runNotebook.sh +++ b/scripts/runNotebook.sh @@ -26,7 +26,8 @@ if [ $# -eq 0 ]; then fi source ${AMICI_PATH}/build/venv/bin/activate -pip3 show nbconvert || (pip3 install --upgrade nbconvert && python3 -m ipykernel install --user --name amici --display-name "Python (amici)") +pip3 show nbconvert || pip3 install --upgrade nbconvert +pip3 show ipykernel || (pip3 install --upgrade ipykernel && python3 -m ipykernel install --user --name amici --display-name "Python (amici)") for arg in "$@"; do if [ -d $arg ]; then From 36a880ee8503962ea5861a8df7ef7e6ebb2e4ed9 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 13 Jul 2023 10:26:37 +0200 Subject: [PATCH 15/32] Fix SBML import for events with trigger functions depending on parameters that are initial assignment targets (#2145) I.e. given the current way of processing initial assignments, events have to be processed first. Otherwise model compilation fails with `use of undeclared identifier`, because the respective parameter was replaced by its initial assignment value. --- python/sdist/amici/sbml_import.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index 7a3f76f657..77d7bb4238 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -549,9 +549,9 @@ def _process_sbml( self._process_species() self._process_reactions() self._process_rules() + self._process_events() self._process_initial_assignments() self._process_species_references() - self._process_events() def check_support(self) -> None: """ From 3cccbb5563853b81e96e0aefc4db43a529dc7c51 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 20 Jul 2023 07:56:47 +0200 Subject: [PATCH 16/32] Add AMICI_TRY_ENABLE_HDF5 env var (#2148) Add AMICI_TRY_ENABLE_HDF5 environment variable to disable trying to build with HDF5 support if HDF5 is found. Related to #2144 --- CMakeLists.txt | 12 ++++++++++-- documentation/python_installation.rst | 4 ++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cdf0ff8fed..a763fbc301 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,8 +6,13 @@ cmake_minimum_required(VERSION 3.15) project(amici) # misc options +if(DEFINED ENV{AMICI_TRY_ENABLE_HDF5}) + option(AMICI_TRY_ENABLE_HDF5 "Build with HDF5 support if available?" + $ENV{AMICI_TRY_ENABLE_HDF5}) +else() + option(AMICI_TRY_ENABLE_HDF5 "Build with HDF5 support if available?" ON) +endif() option(AMICI_PYTHON_BUILD_EXT_ONLY "Build only the Python extension?" OFF) -option(AMICI_TRY_ENABLE_HDF5 "Build with HDF5 support if available?" ON) option(ENABLE_HDF5 "Build with HDF5 support?" OFF) option(SUNDIALS_SUPERLUMT_ENABLE "Enable sundials SuperLUMT?" OFF) option(EXPORT_PACKAGE "Export AMICI library to CMake package registry?" ON) @@ -324,7 +329,10 @@ if(HDF5_FOUND) target_link_libraries(${PROJECT_NAME} PUBLIC hdf5::hdf5_hl_cpp hdf5::hdf5_hl hdf5::hdf5_cpp hdf5::hdf5) else() - message(STATUS "HDF5 library NOT found. Building AMICI WITHOUT HDF5 support.") + message( + STATUS + "HDF5 support disabled or HDF5 library not found. Building AMICI WITHOUT HDF5 support." + ) endif() if(AMICI_PYTHON_BUILD_EXT_ONLY) diff --git a/documentation/python_installation.rst b/documentation/python_installation.rst index 55e53342c6..5332520139 100644 --- a/documentation/python_installation.rst +++ b/documentation/python_installation.rst @@ -295,6 +295,10 @@ environment variables: | | processes to be used for C(++) | | | | compilation (defaults to 1) | | +----------------------------+----------------------------------+---------------------------------+ +| ``AMICI_TRY_ENABLE_HDF5`` | Whether to build AMICI with | ``AMICI_TRY_ENABLE_HDF5=OFF`` | +| | HDF5-support if possible. | | +| | Default: ``ON`` | | ++----------------------------+----------------------------------+---------------------------------+ Installation under Anaconda --------------------------- From bbf3295abecf088c879a78edef167f7c07315d9b Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 30 Jul 2023 14:24:51 +0200 Subject: [PATCH 17/32] Doc: update sbml2amici (#2139) --- python/sdist/amici/sbml_import.py | 33 ++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index 77d7bb4238..d37cc7b9c0 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -307,22 +307,27 @@ def sbml2amici( defined for a particular species. Sensitivity analysis for local parameters is enabled by creating - global parameters _{reactionId}_{localParameterName}. + global parameters ``_{reactionId}_{localParameterName}``. :param model_name: - name of the model/model directory + Name of the generated model package. + Note that in a given Python session, only one model with a given + name can be loaded at a time. + The generated Python extensions cannot be unloaded. Therefore, + make sure to choose a unique name for each model. :param output_dir: - see :meth:`amici.de_export.ODEExporter.set_paths` + Directory where the generated model package will be stored. :param observables: - dictionary( observableId:{'name':observableName - (optional), 'formula':formulaString)}) to be added to the model + Observables to be added to the model: + ``dictionary( observableId:{'name':observableName + (optional), 'formula':formulaString)})``. :param event_observables: - dictionary( eventObservableId:{'name':eventObservableName - (optional), 'event':eventId, 'formula':formulaString)}) to be - added to the model + Event observables to be added to the model: + ``dictionary( eventObservableId:{'name':eventObservableName + (optional), 'event':eventId, 'formula':formulaString)})`` :param constant_parameters: list of SBML Ids identifying constant parameters @@ -339,12 +344,16 @@ def sbml2amici( If nothing is passed for some observable id, a normal model is assumed as default. Either pass a noise type identifier, or a callable generating a custom noise string. + For noise identifiers, see + :func:`amici.import_utils.noise_distribution_to_cost_function`. :param event_noise_distributions: dictionary(eventObservableId: noise type). If nothing is passed for some observable id, a normal model is assumed as default. Either pass a noise type identifier, or a callable generating a custom noise string. + For noise identifiers, see + :func:`amici.import_utils.noise_distribution_to_cost_function`. :param verbose: verbosity level for logging, ``True``/``False`` default to @@ -392,14 +401,14 @@ def sbml2amici( :param log_as_log10: If ``True``, log in the SBML model will be parsed as ``log10`` (default), if ``False``, log will be parsed as natural logarithm - ``ln`` + ``ln``. :param generate_sensitivity_code: If ``False``, the code required for sensitivity computation will - not be generated + not be generated. :param hardcode_symbols: - List of SBML entitiy IDs that are to be hardcoded in the generated model. + List of SBML entity IDs that are to be hardcoded in the generated model. Their values cannot be changed anymore after model import. Currently only parameters that are not targets of rules or initial assignments are supported. @@ -1716,7 +1725,7 @@ def _process_log_likelihood( (False) or for event observables (True). :param event_reg: - indicates whether log-likelihoods definitons should be processed + indicates whether log-likelihood definitions should be processed for event observable regularization (Jrz). If this is activated, measurements are substituted by 0 and the observable by the respective regularization symbol. From 515a68a00f5e7c3eb7e62b69c4ae8410e72b9995 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 30 Jul 2023 14:27:30 +0200 Subject: [PATCH 18/32] Doc: Update Windows installation instructions (#2153) `/LIBPATH` doesn't seem to be compatible with ninja. See #2151. --- documentation/python_installation.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/documentation/python_installation.rst b/documentation/python_installation.rst index 5332520139..d075ac35c1 100644 --- a/documentation/python_installation.rst +++ b/documentation/python_installation.rst @@ -150,15 +150,15 @@ You will also need to define two environment variables: .. code-block:: text - BLAS_LIBS="/LIBPATH:C:/BLAS/OpenBLAS/lib openblas.lib" - BLAS_CFLAGS="/IC:/BLAS/OpenBLAS" + BLAS_LIBS="-LIBPATH:C:/BLAS/OpenBLAS/lib openblas.lib" + BLAS_CFLAGS="-IC:/BLAS/OpenBLAS" One way to do that is to run a PowerShell script with the following commands: .. code-block:: text - [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:/BLAS/OpenBLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::User) - [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:/BLAS/OpenBLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::Process) + [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "-LIBPATH:C:/BLAS/OpenBLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::User) + [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "-LIBPATH:C:/BLAS/OpenBLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::Process) [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:/BLAS/OpenBLAS/include/openblas", [System.EnvironmentVariableTarget]::User) [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:/BLAS/OpenBLAS/include/openblas", [System.EnvironmentVariableTarget]::Process) From 3cd3494e55cb5a74fb54182fc1ef0d21c1937751 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 30 Jul 2023 15:54:39 +0200 Subject: [PATCH 19/32] Cleanup includes in hdf5.cpp (#2154) Remove unused includes. Leftovers from the early beginnings, it seems ... In particular, don't use unistd.h for Windows-compatibility #2151 --- src/hdf5.cpp | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/hdf5.cpp b/src/hdf5.cpp index ffc7390053..e0cdde88cd 100644 --- a/src/hdf5.cpp +++ b/src/hdf5.cpp @@ -16,18 +16,6 @@ #include -#include -#include -#include -#ifdef AMI_HDF5_H_DEBUG -#ifndef __APPLE__ -#include -#else -#include -#endif -#endif -#include -#include namespace amici { namespace hdf5 { From fa69dd3ff9b522755b35332689327f671a030c04 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Wed, 16 Aug 2023 17:01:40 +0200 Subject: [PATCH 20/32] Fix SBML import for event-assigned parameters with non-float initial assignments (#2156) Currently it is incorrectly assumed that the initial value of an event-assigned parameter is (convertible to) a float. Therefore, SBML import fails if the initial assignment contains a symbolic expression that can't be floatified. Fixes #2149. --- python/sdist/amici/sbml_import.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index d37cc7b9c0..8c43d35cf2 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -1060,7 +1060,7 @@ def _process_parameters( for par in settings["var"]: self.symbols[partype][_get_identifier_symbol(par)] = { "name": par.getName() if par.isSetName() else par.getId(), - "value": par.getValue(), + "value": sp.Float(par.getValue()), } # Parameters that need to be turned into expressions @@ -1382,13 +1382,13 @@ def _convert_event_assignment_parameter_targets_to_species(self): ia_init = self._get_element_initial_assignment(par.getId()) parameter_def = { "name": par.getName() if par.isSetName() else par.getId(), - "value": par.getValue() if ia_init is None else ia_init, + "value": sp.Float(par.getValue()) if ia_init is None else ia_init, } # Fixed parameters are added as species such that they can be # targets of events. self.symbols[SymbolId.SPECIES][parameter_target] = { "name": parameter_def["name"], - "init": sp.Float(parameter_def["value"]), + "init": parameter_def["value"], # 'compartment': None, # can ignore for amounts "constant": False, "amount": True, From 6090e21b7be508add3dd66dc8f050375d17d3f13 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 20 Aug 2023 19:39:33 +0200 Subject: [PATCH 21/32] Set CMake policies for cmake 3.27 (#2162) * Use NEW version for any policy introduced up until 3.27. (Closes #2158) * Error on CMake dev warnings on GHA --- CMakeLists.txt | 1 + python/sdist/setup.py | 7 ++++++- src/CMakeLists.template.cmake | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a763fbc301..9d1b4e0e3f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,7 @@ # Build AMICI library # cmake_minimum_required(VERSION 3.15) +cmake_policy(VERSION 3.15...3.27) project(amici) diff --git a/python/sdist/setup.py b/python/sdist/setup.py index 30a72d6eb5..4d65634cc2 100755 --- a/python/sdist/setup.py +++ b/python/sdist/setup.py @@ -130,6 +130,9 @@ def get_extensions(): source_dir="amici", cmake_configure_options=[ *global_cmake_configure_options, + "-Werror=dev" + if "GITHUB_ACTIONS" in os.environ + else "-Wno-error=dev", "-DAMICI_PYTHON_BUILD_EXT_ONLY=ON", f"-DPython3_EXECUTABLE={Path(sys.executable).as_posix()}", ], @@ -142,7 +145,9 @@ def main(): # Readme as long package description to go on PyPi # (https://pypi.org/project/amici/) with open( - os.path.join(os.path.dirname(__file__), "README.md"), "r", encoding="utf-8" + os.path.join(os.path.dirname(__file__), "README.md"), + "r", + encoding="utf-8", ) as fh: long_description = fh.read() diff --git a/src/CMakeLists.template.cmake b/src/CMakeLists.template.cmake index 6c4f80630d..f015a78638 100644 --- a/src/CMakeLists.template.cmake +++ b/src/CMakeLists.template.cmake @@ -1,5 +1,6 @@ # Build AMICI model cmake_minimum_required(VERSION 3.15) +cmake_policy(VERSION 3.15...3.27) project(TPL_MODELNAME) From 44ba0548fdff841826bf2b19c8c4edd673e69a9b Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 21 Aug 2023 10:49:51 +0200 Subject: [PATCH 22/32] Remove complex / complex long KLU functions (#2160) We are unnecessarily building the complex / complex long versions of KLU. This PR removes them to speed up AMICI installation. We are still building both the int32_t and int64_t version of SuiteSparse, which leaves room for further optimization... --- ThirdParty/SuiteSparse/KLU/Source/klu_z.c | 11 ----------- .../SuiteSparse/KLU/Source/klu_z_diagnostics.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_dump.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_extract.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_factor.c | 11 ----------- .../SuiteSparse/KLU/Source/klu_z_free_numeric.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_kernel.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_refactor.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_scale.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_solve.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_sort.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_z_tsolve.c | 11 ----------- ThirdParty/SuiteSparse/KLU/Source/klu_zl.c | 12 ------------ .../SuiteSparse/KLU/Source/klu_zl_diagnostics.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_dump.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_extract.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_factor.c | 12 ------------ .../SuiteSparse/KLU/Source/klu_zl_free_numeric.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_kernel.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_refactor.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_scale.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_solve.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_sort.c | 12 ------------ ThirdParty/SuiteSparse/KLU/Source/klu_zl_tsolve.c | 12 ------------ 24 files changed, 276 deletions(-) delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_diagnostics.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_dump.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_extract.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_factor.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_free_numeric.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_kernel.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_refactor.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_scale.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_solve.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_sort.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_z_tsolve.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_diagnostics.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_dump.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_extract.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_factor.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_free_numeric.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_kernel.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_refactor.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_scale.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_solve.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_sort.c delete mode 100644 ThirdParty/SuiteSparse/KLU/Source/klu_zl_tsolve.c diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z.c deleted file mode 100644 index 3be6ba6529..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z.c: complex int32_t version of klu -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_diagnostics.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_diagnostics.c deleted file mode 100644 index 2204f05977..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_diagnostics.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_diagnostics.c: complex int32_t version of klu_diagnostics -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_diagnostics.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_dump.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_dump.c deleted file mode 100644 index 6579b965a3..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_dump.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_dump.c: complex int32_t version of klu_dump -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_dump.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_extract.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_extract.c deleted file mode 100644 index 5cf68c1b0a..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_extract.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_extract.c: complex int32_t version of klu_extract -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_extract.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_factor.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_factor.c deleted file mode 100644 index d5f345aa40..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_factor.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_factor.c: complex int32_t version of klu_factor -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_factor.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_free_numeric.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_free_numeric.c deleted file mode 100644 index e6ac863f1a..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_free_numeric.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_free_numeric.c: complex int32_t version of klu_free_numeric -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_free_numeric.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_kernel.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_kernel.c deleted file mode 100644 index 4b52d87649..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_kernel.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_kernel.c: complex int32_t version of klu_kernel -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_kernel.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_refactor.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_refactor.c deleted file mode 100644 index 0dff84e37e..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_refactor.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_refactor.c: complex int32_t version of klu_refactor -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_refactor.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_scale.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_scale.c deleted file mode 100644 index 47ee996790..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_scale.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_scale.c: complex int32_t version of klu_scale -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_scale.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_solve.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_solve.c deleted file mode 100644 index 362a77ecb1..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_solve.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_solve.c: complex int32_t version of klu_solve -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_solve.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_sort.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_sort.c deleted file mode 100644 index 3f0ea62a32..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_sort.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_sort.c: complex int32_t version of klu_sort -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_sort.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_z_tsolve.c b/ThirdParty/SuiteSparse/KLU/Source/klu_z_tsolve.c deleted file mode 100644 index d82a1038f1..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_z_tsolve.c +++ /dev/null @@ -1,11 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_z_tsolve.c: complex int32_t version of klu_tsolve -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#include "klu_tsolve.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl.c deleted file mode 100644 index 23dbf2e9b9..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl.c: complex int64_t version of klu -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_diagnostics.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_diagnostics.c deleted file mode 100644 index 28e8979b1e..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_diagnostics.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_diagnostics.c: complex int64_t version of klu_diagnostics -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_diagnostics.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_dump.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_dump.c deleted file mode 100644 index 4349cd1ebc..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_dump.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_dump.c: complex int64_t version of klu_dump -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_dump.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_extract.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_extract.c deleted file mode 100644 index 9d2725d70f..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_extract.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_extract.c: complex int64_t version of klu_extract -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_extract.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_factor.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_factor.c deleted file mode 100644 index b259e62ea9..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_factor.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_factor.c: complex int64_t version of klu_factor -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_factor.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_free_numeric.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_free_numeric.c deleted file mode 100644 index 5ab980b57e..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_free_numeric.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_free_numeric.c: complex int64_t version of klu_free_numeric -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_free_numeric.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_kernel.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_kernel.c deleted file mode 100644 index 5ab9c5af1e..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_kernel.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_kernel.c: complex int64_t version of klu_kernel -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_kernel.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_refactor.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_refactor.c deleted file mode 100644 index d6216515ba..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_refactor.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_refactor.c: complex int64_t version of klu_refactor -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_refactor.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_scale.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_scale.c deleted file mode 100644 index 9a2c697696..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_scale.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_scale.c: complex int64_t version of klu_scale -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_scale.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_solve.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_solve.c deleted file mode 100644 index 20396f6197..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_solve.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_solve.c: complex int64_t version of klu_solve -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_solve.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_sort.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_sort.c deleted file mode 100644 index 5cfcb8c8be..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_sort.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_sort.c: complex int64_t version of klu_sort -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_sort.c" - diff --git a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_tsolve.c b/ThirdParty/SuiteSparse/KLU/Source/klu_zl_tsolve.c deleted file mode 100644 index 3a6884c9ba..0000000000 --- a/ThirdParty/SuiteSparse/KLU/Source/klu_zl_tsolve.c +++ /dev/null @@ -1,12 +0,0 @@ -//------------------------------------------------------------------------------ -// KLU/Source/klu_zl_tsolve.c: complex int64_t version of klu_tsolve -//------------------------------------------------------------------------------ - -// KLU, Copyright (C) 2004-2022, University of Florida, All Rights Reserved. -// Authors: Timothy A. Davis and Ekanathan Palamadai. -// SPDX-License-Identifier: LGPL-2.1+ - -#define COMPLEX -#define DLONG -#include "klu_tsolve.c" - From 260b4f62d9294e4208a8bc1ba68c7ba02ac0599b Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 24 Aug 2023 13:21:37 +0200 Subject: [PATCH 23/32] Fix lib/ vs lib64/ issue with SUNDIALS config (#2165) Previously, the sundials cmake configuration wasn't found on systems that use lib64/ instead of lib/ Closes #2143 --- CMakeLists.txt | 2 +- cmake/AmiciConfig.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9d1b4e0e3f..966e844142 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -126,7 +126,7 @@ set(VENDORED_SUNDIALS_BUILD_DIR ${VENDORED_SUNDIALS_DIR}/build) set(VENDORED_SUNDIALS_INSTALL_DIR ${VENDORED_SUNDIALS_BUILD_DIR}) set(SUNDIALS_PRIVATE_INCLUDE_DIRS "${VENDORED_SUNDIALS_DIR}/src") find_package(SUNDIALS REQUIRED PATHS - "${VENDORED_SUNDIALS_INSTALL_DIR}/lib/cmake/sundials/") + "${VENDORED_SUNDIALS_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/cmake/sundials/") message(STATUS "Found SUNDIALS: ${SUNDIALS_DIR}") set(GSL_LITE_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/ThirdParty/gsl") diff --git a/cmake/AmiciConfig.cmake b/cmake/AmiciConfig.cmake index 7ac565f3c6..cb7d57d51a 100644 --- a/cmake/AmiciConfig.cmake +++ b/cmake/AmiciConfig.cmake @@ -42,7 +42,7 @@ set_target_properties(SUNDIALS::KLU PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${KLU_INCLUDE_DIR}") find_package(SUNDIALS REQUIRED PATHS - "@CMAKE_SOURCE_DIR@/ThirdParty/sundials/build/lib/cmake/sundials/") + "@CMAKE_SOURCE_DIR@/ThirdParty/sundials/build/@CMAKE_INSTALL_LIBDIR@/cmake/sundials/") if(@Boost_CHRONO_FOUND@) find_package(Boost COMPONENTS chrono REQUIRED) From 16aad6fe5cc4f0e71e96f3f6b87fae9d6afdba62 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 24 Aug 2023 14:19:35 +0200 Subject: [PATCH 24/32] Update reference list (#2147) Closes #2040 --- documentation/amici_refs.bib | 32 ++++++++++++++++++++++++++++++++ documentation/references.md | 18 +++++++++++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/documentation/amici_refs.bib b/documentation/amici_refs.bib index 34c2cf1ba1..8730883075 100644 --- a/documentation/amici_refs.bib +++ b/documentation/amici_refs.bib @@ -1185,6 +1185,38 @@ @Misc{SluijsZho2023 modificationdate = {2023-07-06T10:40:37}, } +@Article{BuckBas2023, + author = {Michèle C. Buck and Lisa Bast and Judith S. Hecker and Jennifer Rivière and Maja Rothenberg-Thurley and Luisa Vogel and Dantong Wang and Immanuel Andrä and Fabian J. Theis and Florian Bassermann and Klaus H. Metzeler and Robert A.J. Oostendorp and Carsten Marr and Katharina S. Götze}, + journal = {iScience}, + title = {Progressive Disruption of Hematopoietic Architecture from Clonal Hematopoiesis to {MDS}}, + year = {2023}, + issn = {2589-0042}, + pages = {107328}, + abstract = {Summary +Clonal hematopoiesis of indeterminate potential (CHIP) describes the age-related acquisition of somatic mutations in hematopoietic stem/progenitor cells (HSPC) leading to clonal blood cell expansion. Although CHIP mutations drive myeloid malignancies like myelodysplastic syndromes (MDS) it is unknown if clonal expansion is attributable to changes in cell type kinetics, or involves reorganization of the hematopoietic hierarchy. Using computational modeling we analyzed differentiation and proliferation kinetics of cultured hematopoietic stem cells (HSC) from 8 healthy individuals, 7 CHIP, and 10 MDS patients. While the standard hematopoietic hierarchy explained HSPC kinetics in healthy samples, 57% of CHIP and 70% of MDS samples were best described with alternative hierarchies. Deregulated kinetics were found at various HSPC compartments with high inter-individual heterogeneity in CHIP and MDS, while altered HSC rates were most relevant in MDS. Quantifying kinetic heterogeneity in detail, we show that reorganization of the HSPC compartment is detectable in the premalignant CHIP state.}, + creationdate = {2023-07-17T09:12:31}, + doi = {10.1016/j.isci.2023.107328}, + keywords = {clonal hematopoiesis, myelodysplastic syndrome, computational modeling, hematopoietic hierarchy}, + modificationdate = {2023-07-17T09:12:50}, + url = {https://www.sciencedirect.com/science/article/pii/S2589004223014050}, +} + +@Article{TunedalVio2023, + author = {Tunedal, Kajsa and Viola, Federica and Garcia, Belén Casas and Bolger, Ann and Nyström, Fredrik H. and Östgren, Carl Johan and Engvall, Jan and Lundberg, Peter and Dyverfeldt, Petter and Carlhäll, Carl-Johan and Cedersund, Gunnar and Ebbers, Tino}, + journal = {The Journal of Physiology}, + title = {Haemodynamic effects of hypertension and type 2 diabetes: Insights from a {4D} flow {MRI-based} personalized cardiovascular mathematical model}, + year = {2023}, + number = {n/a}, + volume = {n/a}, + abstract = {Abstract Type 2 diabetes (T2D) and hypertension increase the risk of cardiovascular diseases mediated by whole-body changes to metabolism, cardiovascular structure and haemodynamics. The haemodynamic changes related to hypertension and T2D are complex and subject-specific, however, and not fully understood. We aimed to investigate the haemodynamic mechanisms in T2D and hypertension by comparing the haemodynamics between healthy controls and subjects with T2D, hypertension, or both. For all subjects, we combined 4D flow magnetic resonance imaging data, brachial blood pressure and a cardiovascular mathematical model to create a comprehensive subject-specific analysis of central haemodynamics. When comparing the subject-specific haemodynamic parameters between the four groups, the predominant haemodynamic difference is impaired left ventricular relaxation in subjects with both T2D and hypertension compared to subjects with only T2D, only hypertension and controls. The impaired relaxation indicates that, in this cohort, the long-term changes in haemodynamic load of co-existing T2D and hypertension cause diastolic dysfunction demonstrable at rest, whereas either disease on its own does not. However, through subject-specific predictions of impaired relaxation, we show that altered relaxation alone is not enough to explain the subject-specific and group-related differences; instead, a combination of parameters is affected in T2D and hypertension. These results confirm previous studies that reported more adverse effects from the combination of T2D and hypertension compared to either disease on its own. Furthermore, this shows the potential of personalized cardiovascular models in providing haemodynamic mechanistic insights and subject-specific predictions that could aid in the understanding and treatment planning of patients with T2D and hypertension. Key points The combination of 4D flow magnetic resonance imaging data and a cardiovascular mathematical model allows for a comprehensive analysis of subject-specific haemodynamic parameters that otherwise cannot be derived non-invasively. Using this combination, we show that diastolic dysfunction in subjects with both type 2 diabetes (T2D) and hypertension is the main group-level difference between controls, subjects with T2D, subjects with hypertension, and subjects with both T2D and hypertension. These results suggest that, in this relatively healthy population, the additional load of both hypertension and T2D affects the haemodynamic function of the left ventricle, whereas each disease on its own is not enough to cause significant effects under resting conditions. Finally, using the subject-specific model, we show that the haemodynamic effects of diastolic dysfunction alone are not sufficient to explain all the observed haemodynamic differences. Instead, additional subject-specific variations in cardiac and vascular function combine to explain the complex haemodynamics of subjects affected by hypertension and/or T2D.}, + creationdate = {2023-07-27T08:14:11}, + doi = {10.1113/JP284652}, + eprint = {https://physoc.onlinelibrary.wiley.com/doi/pdf/10.1113/JP284652}, + keywords = {cardiovascular modelling, clinical data, computer modelling, diabetes, hemodynamic, hypertension, mathematical model}, + modificationdate = {2023-07-27T08:19:05}, + url = {https://physoc.onlinelibrary.wiley.com/doi/abs/10.1113/JP284652}, +} + @Comment{jabref-meta: databaseType:bibtex;} @Comment{jabref-meta: grouping: diff --git a/documentation/references.md b/documentation/references.md index d0fec0f88b..3f5833e26f 100644 --- a/documentation/references.md +++ b/documentation/references.md @@ -1,6 +1,6 @@ # References -List of publications using AMICI. Total number is 77. +List of publications using AMICI. Total number is 79. If you applied AMICI in your work and your publication is missing, please let us know via a new GitHub issue. @@ -14,6 +14,13 @@ If you applied AMICI in your work and your publication is missing, please let us

2023

+
+Buck, Michèle C., Lisa Bast, Judith S. Hecker, Jennifer Rivière, Maja +Rothenberg-Thurley, Luisa Vogel, Dantong Wang, et al. 2023. +“Progressive Disruption of Hematopoietic Architecture from Clonal +Hematopoiesis to MDS.” iScience, 107328. https://doi.org/10.1016/j.isci.2023.107328. +
Contento, Lorenzo, Noemi Castelletti, Elba Raimúndez, Ronan Le Gleut, Yannik Schälte, Paul Stapor, Ludwig Christian Hinske, et al. 2023. @@ -66,6 +73,15 @@ Nelissen, Hans Heus, and Wilhelm Huck. 2023. “Inverse Design of Enzymatic Reaction Network States.” https://doi.org/10.21203/rs.3.rs-2646906/v1.
+
+Tunedal, Kajsa, Federica Viola, Belén Casas Garcia, Ann Bolger, Fredrik +H. Nyström, Carl Johan Östgren, Jan Engvall, et al. 2023. +“Haemodynamic Effects of Hypertension and Type 2 Diabetes: +Insights from a 4d Flow MRI-based Personalized Cardiovascular Mathematical +Model.” The Journal of Physiology n/a (n/a). https://doi.org/10.1113/JP284652. +

2022

Date: Fri, 25 Aug 2023 09:42:26 +0200 Subject: [PATCH 25/32] CMake: fix scope for -DHAS_BOOST_CHRONO (#2163) * CMake: fix scope for -DHAS_BOOST_CHRONO (private -> public) Fixes the issue of potentially mixing boost and non-boost versions of CpuTimer. * CpuTimer unit test * simplify CpuTimer * document issues with compile definitions from dependencies * Add `amici.CpuTimer.uses_thread_clock` to check at runtime whether we are using `thread_clock` --- CMakeLists.txt | 2 +- include/amici/misc.h | 18 +++++++++--------- swig/CMakeLists.txt | 4 ++++ tests/cpp/unittests/testMisc.cpp | 12 ++++++++++++ 4 files changed, 26 insertions(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 966e844142..a31104b47e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -296,7 +296,7 @@ if("$ENV{ENABLE_AMICI_DEBUGGING}" endif() target_compile_definitions( - ${PROJECT_NAME} PRIVATE $<$:HAS_BOOST_CHRONO>) + ${PROJECT_NAME} PUBLIC $<$:HAS_BOOST_CHRONO>) target_link_libraries( ${PROJECT_NAME} diff --git a/include/amici/misc.h b/include/amici/misc.h index 32dde8edcd..c18606e3e9 100644 --- a/include/amici/misc.h +++ b/include/amici/misc.h @@ -255,10 +255,10 @@ template bool is_equal(T const& a, T const& b) { } #ifdef BOOST_CHRONO_HAS_THREAD_CLOCK -/** Tracks elapsed CPU time. */ +/** Tracks elapsed CPU time using boost::chrono::thread_clock. */ class CpuTimer { using clock = boost::chrono::thread_clock; - using time_point = boost::chrono::thread_clock::time_point; + using time_point = clock::time_point; using d_seconds = boost::chrono::duration; using d_milliseconds = boost::chrono::duration; @@ -279,8 +279,7 @@ class CpuTimer { * @return CPU time in seconds */ double elapsed_seconds() const { - return boost::chrono::duration_cast(clock::now() - start_) - .count(); + return d_seconds(clock::now() - start_).count(); } /** @@ -289,18 +288,17 @@ class CpuTimer { * @return CPU time in milliseconds */ double elapsed_milliseconds() const { - return boost::chrono::duration_cast( - clock::now() - start_ - ) - .count(); + return d_milliseconds(clock::now() - start_).count(); } + static const bool uses_thread_clock = true; + private: /** Start time */ time_point start_; }; #else -/** Tracks elapsed CPU time. */ +/** Tracks elapsed CPU time using std::clock. */ class CpuTimer { public: /** @@ -332,6 +330,8 @@ class CpuTimer { / CLOCKS_PER_SEC; } + static const bool uses_thread_clock = false; + private: /** Start time */ std::clock_t start_; diff --git a/swig/CMakeLists.txt b/swig/CMakeLists.txt index 7c8e2145d1..87d607d33a 100644 --- a/swig/CMakeLists.txt +++ b/swig/CMakeLists.txt @@ -112,6 +112,10 @@ if(${SWIG_VERSION} VERSION_LESS 4.1.0) PROPERTY SWIG_COMPILE_OPTIONS -py3) endif() +# NOTE: No public definitions of any dependency are forwarded to swig, +# they are only used for compiling the swig-generated source file. +# Any definition that are relevant for swig-code generation, need to be +# forwarded manually. target_link_libraries(_amici amici Python3::Python) if(WIN32) add_custom_command( diff --git a/tests/cpp/unittests/testMisc.cpp b/tests/cpp/unittests/testMisc.cpp index aa3ae226c9..80d2c3bc36 100644 --- a/tests/cpp/unittests/testMisc.cpp +++ b/tests/cpp/unittests/testMisc.cpp @@ -723,4 +723,16 @@ TEST(SpanEqual, SpanEqual) EXPECT_FALSE(is_equal(a, b)); } +TEST(CpuTimer, CpuTimer) +{ + amici::CpuTimer timer; + auto elapsed = timer.elapsed_seconds(); + EXPECT_LE(0.0, elapsed); + EXPECT_GT(1.0, elapsed); + + elapsed = timer.elapsed_milliseconds(); + EXPECT_LT(0.0, elapsed); + EXPECT_GT(1000.0, elapsed); +} + } // namespace From 98c782dc27d7729436a3dc51bf3e5705325a4e8f Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Fri, 25 Aug 2023 09:44:33 +0200 Subject: [PATCH 26/32] Combine code for sparse model functions and their index files (#2159) Combine code for sparse model functions and their index files, i.e. generate only a single file instead of 3 individual files for content, rowvals, and colptrs, respectively. Advantage: Faster import of smaller models and fewer files. For a toy model, this reduced the build steps from 44 to 28, and reduced build time by >20% on my computer. Disadvantage: None found, so I don't think it worth adding an option for (not) combining those files. For larger models, there shouldn't be any impact. The extra time for compiling the index arrays should be negligible compared to computing the contents. Related to #2119 Here a test for a large model (N=1): | File | Size | Compilation time (s) | |--------------|--------:|---------------------:| | dwdx | 22.4MiB | 3413.64 | | dwdx_colptrs | 2.0KiB | 2.79 | | dwdx_rowvals | 65.6KiB | 2.66 | | *combined* | | 3416.79 | I'd consider this time increase negligible. --- python/sdist/amici/de_export.py | 48 ++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/python/sdist/amici/de_export.py b/python/sdist/amici/de_export.py index 96e25861ab..1b46b5be84 100644 --- a/python/sdist/amici/de_export.py +++ b/python/sdist/amici/de_export.py @@ -32,6 +32,7 @@ Set, Tuple, Union, + Literal ) import numpy as np @@ -2838,9 +2839,6 @@ def _generate_c_code(self) -> None: if func_info.generate_body: dec = log_execution_time(f"writing {func_name}.cpp", logger) dec(self._write_function_file)(func_name) - if func_name in sparse_functions and func_info.body: - self._write_function_index(func_name, "colptrs") - self._write_function_index(func_name, "rowvals") for name in self.model.sym_names(): # only generate for those that have nontrivial implementation, @@ -3040,8 +3038,24 @@ def _write_function_file(self, function: str) -> None: else: equations = self.model.eq(function) + # function body + if function == "create_splines": + body = self._get_create_splines_body() + else: + body = self._get_function_body(function, equations) + if not body: + return + + # colptrs / rowvals for sparse matrices + if function in sparse_functions: + lines = self._generate_function_index(function, "colptrs") + lines.extend(self._generate_function_index(function, "rowvals")) + lines.append("\n\n") + else: + lines = [] + # function header - lines = [ + lines.extend([ '#include "amici/symbolic_functions.h"', '#include "amici/defines.h"', '#include "sundials/sundials_types.h"', @@ -3049,7 +3063,7 @@ def _write_function_file(self, function: str) -> None: "#include ", "#include ", "", - ] + ]) if function == "create_splines": lines += ['#include "amici/splinefunctions.h"', "#include "] @@ -3096,14 +3110,6 @@ def _write_function_file(self, function: str) -> None: ] ) - # function body - if function == "create_splines": - body = self._get_create_splines_body() - else: - body = self._get_function_body(function, equations) - if not body: - return - if self.assume_pow_positivity and func_info.assume_pow_positivity: pow_rx = re.compile(r"(^|\W)std::pow\(") body = [ @@ -3137,16 +3143,20 @@ def _write_function_file(self, function: str) -> None: with open(filename, "w") as fileout: fileout.write("\n".join(lines)) - def _write_function_index(self, function: str, indextype: str) -> None: + def _generate_function_index( + self, function: str, indextype: Literal["colptrs", "rowvals"] + ) -> List[str]: """ - Generate equations and write the C++ code for the function - ``function``. + Generate equations and C++ code for the function ``function``. :param function: name of the function to be written (see ``self.functions``) :param indextype: type of index {'colptrs', 'rowvals'} + + :returns: + The code lines for the respective function index file """ if indextype == "colptrs": values = self.model.colptrs(function) @@ -3233,11 +3243,7 @@ def _write_function_index(self, function: str, indextype: str) -> None: ] ) - filename = f"{function}_{indextype}.cpp" - filename = os.path.join(self.model_path, filename) - - with open(filename, "w") as fileout: - fileout.write("\n".join(lines)) + return lines def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]: """ From 8c80329fc0c905b11f120ace51bc73dad48ed2da Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Fri, 25 Aug 2023 15:43:53 +0200 Subject: [PATCH 27/32] CI: Update sonar-scanner (#2167) * CI: Update sonar-scanner * Fix: `The property 'sonar.login' is deprecated and will be removed in the future. Please use the 'sonar.token' property instead when passing a token.` * Remove sonar cache config and GHA cache action By now, cache is stored on the server by default see https://docs.sonarcloud.io/advanced-setup/languages/c-c-objective-c/#analysis-cache * Sonar token to secrets --- .github/workflows/test_python_cplusplus.yml | 10 ++-------- sonar-project.properties | 3 --- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test_python_cplusplus.yml b/.github/workflows/test_python_cplusplus.yml index 8a4eaa3bb8..416252ee4b 100644 --- a/.github/workflows/test_python_cplusplus.yml +++ b/.github/workflows/test_python_cplusplus.yml @@ -36,7 +36,7 @@ jobs: - run: echo "BNGPATH=${GITHUB_WORKSPACE}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV # sonar cloud - - run: echo "SONAR_SCANNER_VERSION=4.7.0.2747" >> $GITHUB_ENV + - run: echo "SONAR_SCANNER_VERSION=5.0.1.3006" >> $GITHUB_ENV - run: echo "SONAR_SCANNER_HOME=${HOME}/.sonar/sonar-scanner-$SONAR_SCANNER_VERSION-linux" >> $GITHUB_ENV - run: echo "SONAR_SCANNER_OPTS=-server" >> $GITHUB_ENV - run: echo "${SONAR_SCANNER_HOME}/bin" >> $GITHUB_PATH @@ -77,13 +77,6 @@ jobs: run: | CI_SONARCLOUD=TRUE scripts/buildAmici.sh - - name: Cache sonar files - id: cache-sonar - uses: actions/cache@v3 - with: - path: sonar_cache - key: ${{ runner.os }}-sonar_cache - - name: C++ tests run: | scripts/run-cpp-tests.sh @@ -157,6 +150,7 @@ jobs: - name: Run sonar-scanner env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} run: | sonar-scanner \ -Dsonar.cfamily.build-wrapper-output=bw-output \ diff --git a/sonar-project.properties b/sonar-project.properties index 6b824acb66..4ded528912 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -4,7 +4,6 @@ # https://sonarcloud.io/documentation/analysis/languages/cfamily/ sonar.host.url=https://sonarcloud.io -sonar.login=af35cb17710485d21c8e453a77f1f008eae1f7a4 sonar.organization=icb-dcm sonar.projectKey=ICB-DCM_AMICI @@ -27,8 +26,6 @@ sonar.sourceEncoding=UTF-8 sonar.cfamily.threads=2 sonar.cfamily.gcov.reportsPath=build -sonar.cfamily.cache.enabled=true -sonar.cfamily.cache.path=sonar_cache sonar.cpp.std=c++17 sonar.python.coverage.reportPaths=build/coverage_py.xml From 0bf62161c2c3c75db544dd46e311480dbafd116f Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 24 Aug 2023 07:48:49 +0200 Subject: [PATCH 28/32] Release notes / v0.19.0 --- CHANGELOG.md | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ version.txt | 2 +- 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ab17965d0..aa46196e0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,55 @@ ## v0.X Series +### v0.19.0 (2023-08-26) + +**Features** +* SBML import now supports `rateOf` + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2120 +* Added `Model.{get,set}SteadyStateComputationMode` (analogous to `SteadyStateSensitivityMode`) + which allows to choose how steady state is computed. + by @plakrisenko in https://github.com/AMICI-dev/AMICI/pull/2074 + + **Note: The default `SteadyStateSensitivityMode` changed from `newtonOnly` to `integrateIfNewtonFails`.** + +* SBML import: Allow hardcoding of numerical values + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2134 +* Added `antimony2amici` for more convenient antimony import + (simplifies working with raw ODEs, see documentation) + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2142 +* Added `AMICI_TRY_ENABLE_HDF5` environment variable to control whether to search for HDF5 or not + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2148 + +**Fixes** + +* Fixed SBML import for events with trigger functions depending on parameters that are initial + assignment targets + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2145 +* Fixed SBML import for event-assigned parameters with non-float initial assignments + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2156 +* Fixed `unistd.h` dependency of `hdf5.cpp` that led to compilation + failures on Windows + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2154 +* Set CMake policies for cmake 3.27 by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2162 +* Fixed a `lib/` vs `lib64/` issue, leading to `SUNDIALSConfig.cmake`-not-found issues + on some systems + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2165 +* CMake: fixed scope of `-DHAS_BOOST_CHRONO` which may have lead to a mix of + `boost::chrono::thread_clock` and `std::clock` being used in programs using amici, + and potentially segmentation faults + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2163 + +Performance: +* Combined code for sparse model functions and their index files for slightly faster + compilation of small models + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2159 + +* Removed complex / complex long KLU functions for slightly faster amici package installation + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2160 + +**Full Changelog**: https://github.com/AMICI-dev/AMICI/compare/v0.18.1...v0.19.0 + + ### v0.18.1 (2023-06-26) Fixes: diff --git a/version.txt b/version.txt index 249afd517d..1cf0537c34 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.18.1 +0.19.0 From 3c5e997df3655c26dde35705ef25b2a0f419fe8b Mon Sep 17 00:00:00 2001 From: Stephan Grein Date: Tue, 19 Sep 2023 21:48:34 +0200 Subject: [PATCH 29/32] Doc: Add installation instructions for Arch Linux (#2173) Installation instructions for Arch Linux --- documentation/python_installation.rst | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/documentation/python_installation.rst b/documentation/python_installation.rst index d075ac35c1..a0fcf0908b 100644 --- a/documentation/python_installation.rst +++ b/documentation/python_installation.rst @@ -66,6 +66,43 @@ Install AMICI: pip3 install amici +Arch Linux +---------- + +Install the AMICI dependencies via ``pacman`` +(this requires superuser privileges): + +.. code-block:: bash + + sudo pacman -S python swig openblas gcc hdf5 boost-libs + +Install AMICI: + +.. code-block:: bash + + pip3 install amici + +Alternatively: + +1. Check if packages are already installed with the required versions for AMICI installation. + +.. code-block:: bash + + sudo pacman -Si python swig openblas gcc hdf5 boost-libs + +2. Upgrade installed packages if required mininum versions are not satisfied for AMICI installation. + +.. code-block:: bash + + sudo pacman -Su python swig openblas gcc hdf5 boost-libs + +3. Install AMICI: + +.. code-block:: bash + + pip3 install amici + + Installation on OSX +++++++++++++++++++ From 890b9937a99f0bf17fb0f7ae62257a3b34b97ed9 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 31 Aug 2023 10:20:57 +0200 Subject: [PATCH 30/32] Fix unused pysb2amici / sbml2amici / DEExporter `compiler` argument (#2168) This was ignored since switching to CMake-based builds. Now it works again. The value of `compiler` is forwarded to the `CXX` environment variable when CMake is invoked. Fixes #2140 --- python/sdist/amici/de_export.py | 16 ++++++++++------ python/sdist/amici/pysb_import.py | 4 ++-- python/sdist/amici/sbml_import.py | 4 ++-- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/python/sdist/amici/de_export.py b/python/sdist/amici/de_export.py index 1b46b5be84..de1b96a1c6 100644 --- a/python/sdist/amici/de_export.py +++ b/python/sdist/amici/de_export.py @@ -2692,7 +2692,8 @@ class DEExporter: due to numerical errors :ivar compiler: - distutils/setuptools compiler selection to build the Python extension + Absolute path to the compiler executable to be used to build the Python + extension, e.g. ``/usr/bin/clang``. :ivar functions: carries C++ function signatures and other specifications @@ -2755,8 +2756,8 @@ def __init__( used to avoid problems with state variables that may become negative due to numerical errors - :param compiler: distutils/setuptools compiler selection to build the - python extension + :param compiler: Absolute path to the compiler executable to be used + to build the Python extension, e.g. ``/usr/bin/clang``. :param allow_reinit_fixpar_initcond: see :class:`amici.de_export.DEExporter` @@ -2876,8 +2877,8 @@ def _compile_c_code( Make model compilation verbose :param compiler: - distutils/setuptools compiler selection to build the python - extension + Absolute path to the compiler executable to be used to build the Python + extension, e.g. ``/usr/bin/clang``. """ # setup.py assumes it is run from within the model directory module_dir = self.model_path @@ -2900,8 +2901,10 @@ def _compile_c_code( ] ) + env = os.environ.copy() if compiler is not None: - script_args.extend([f"--compiler={compiler}"]) + # CMake will use the compiler specified in the CXX environment variable + env["CXX"] = compiler # distutils.core.run_setup looks nicer, but does not let us check the # result easily @@ -2912,6 +2915,7 @@ def _compile_c_code( stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True, + env=env, ) except subprocess.CalledProcessError as e: print(e.output.decode("utf-8")) diff --git a/python/sdist/amici/pysb_import.py b/python/sdist/amici/pysb_import.py index 7e413a2a88..0dbba69ddb 100644 --- a/python/sdist/amici/pysb_import.py +++ b/python/sdist/amici/pysb_import.py @@ -112,8 +112,8 @@ def pysb2amici( errors :param compiler: - distutils/setuptools compiler selection to build the python - extension + Absolute path to the compiler executable to be used to build the Python + extension, e.g. ``/usr/bin/clang``. :param compute_conservation_laws: if set to ``True``, conservation laws are automatically computed and diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index 8c43d35cf2..98313c3d77 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -365,8 +365,8 @@ def sbml2amici( negative due to numerical errors :param compiler: - distutils/setuptools compiler selection to build the - python extension + Absolute path to the compiler executable to be used to build the Python + extension, e.g. ``/usr/bin/clang``. :param allow_reinit_fixpar_initcond: see :class:`amici.de_export.ODEExporter` From 847a5966cad44a4c3b353d1cdca520b7e83c4da9 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 31 Aug 2023 12:56:36 +0200 Subject: [PATCH 31/32] Fix pre-commit configuration (#2171) Relevant changes are only in .pre-commit-config.yaml * run black on notebooks via pre-commit * set proper line width (pyproject.toml is not used if we are running things from the repo root) * compatible line width for black + isort * fix json error in binder/overview.ipynb * re-blacken everything --- .pre-commit-config.yaml | 8 +- binder/overview.ipynb | 3 +- documentation/ExampleJax.ipynb | 87 +- documentation/GettingStarted.ipynb | 11 +- documentation/conf.py | 29 +- documentation/recreate_reference_list.py | 8 +- python/benchmark/benchmark_pysb.py | 20 +- .../ExampleEquilibrationLogic.ipynb | 289 ++++--- python/examples/example_errors.ipynb | 281 +++++-- python/examples/example_jax/ExampleJax.ipynb | 109 ++- .../example_performance_optimization.ipynb | 31 +- python/examples/example_petab/petab.ipynb | 19 +- .../ExampleExperimentalConditions.ipynb | 90 ++- .../createModelPresimulation.py | 13 +- .../example_splines/ExampleSplines.ipynb | 234 ++++-- .../ExampleSplinesSwameye2003.ipynb | 758 ++++++++++++++---- .../ExampleSteadystate.ipynb | 260 +++--- python/sdist/amici/__init__.py | 10 +- python/sdist/amici/__main__.py | 4 +- .../amici/conserved_quantities_demartino.py | 80 +- python/sdist/amici/custom_commands.py | 3 +- python/sdist/amici/cxxcodeprinter.py | 37 +- python/sdist/amici/de_export.py | 298 +++++-- python/sdist/amici/de_model.py | 26 +- python/sdist/amici/gradient_check.py | 29 +- python/sdist/amici/import_utils.py | 47 +- python/sdist/amici/logging.py | 6 +- python/sdist/amici/numpy.py | 26 +- python/sdist/amici/pandas.py | 47 +- python/sdist/amici/parameter_mapping.py | 37 +- python/sdist/amici/petab_import.py | 89 +- python/sdist/amici/petab_import_pysb.py | 28 +- python/sdist/amici/petab_objective.py | 140 +++- python/sdist/amici/petab_simulate.py | 11 +- python/sdist/amici/petab_util.py | 12 +- python/sdist/amici/pysb_import.py | 168 +++- python/sdist/amici/sbml_import.py | 273 +++++-- python/sdist/amici/sbml_utils.py | 8 +- python/sdist/amici/splines.py | 187 +++-- python/sdist/amici/swig.py | 20 +- python/sdist/amici/swig_wrappers.py | 14 +- python/sdist/pyproject.toml | 2 +- python/tests/conftest.py | 4 +- .../bngwiki_egfr_simple_deletemolecules.py | 4 +- python/tests/splines_utils.py | 57 +- .../test_compare_conservation_laws_sbml.py | 26 +- .../test_conserved_quantities_demartino.py | 35 +- python/tests/test_edata.py | 4 +- python/tests/test_events.py | 46 +- python/tests/test_hdf5.py | 4 +- python/tests/test_heavisides.py | 38 +- python/tests/test_misc.py | 16 +- python/tests/test_observable_events.py | 8 +- python/tests/test_pandas.py | 8 +- python/tests/test_parameter_mapping.py | 26 +- python/tests/test_petab_import.py | 8 +- python/tests/test_petab_objective.py | 4 +- python/tests/test_petab_simulate.py | 4 +- python/tests/test_preequilibration.py | 51 +- python/tests/test_pregenerated_models.py | 37 +- python/tests/test_pysb.py | 19 +- python/tests/test_rdata.py | 4 +- python/tests/test_sbml_import.py | 53 +- .../test_sbml_import_special_functions.py | 19 +- python/tests/test_splines.py | 7 +- python/tests/test_splines_python.py | 61 +- python/tests/test_splines_short.py | 4 +- python/tests/test_swig_interface.py | 28 +- python/tests/util.py | 20 +- tests/benchmark-models/evaluate_benchmark.py | 16 +- .../benchmark-models/test_petab_benchmark.py | 18 +- tests/benchmark-models/test_petab_model.py | 24 +- tests/conftest.py | 21 +- tests/generateTestConfig/example.py | 13 +- tests/generateTestConfig/example_events.py | 8 +- tests/generateTestConfig/example_jakstat.py | 16 +- .../example_nested_events.py | 8 +- tests/generateTestConfig/example_neuron.py | 4 +- tests/performance/test.py | 8 +- tests/petab_test_suite/conftest.py | 13 +- tests/petab_test_suite/test_petab_suite.py | 37 +- tests/testSBMLSuite.py | 43 +- 82 files changed, 3426 insertions(+), 1250 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e0ea39c7c2..521ff54f85 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,7 @@ repos: hooks: - id: isort name: isort (python) - args: ["--profile", "black", "--filter-files"] + args: ["--profile", "black", "--filter-files", "--line-length", "79"] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: @@ -17,12 +17,14 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 23.7.0 hooks: - - id: black + - id: black-jupyter # It is recommended to specify the latest version of Python # supported by your project here, or alternatively use # pre-commit's default_language_version, see # https://pre-commit.com/#top_level-default_language_version language_version: python3.11 + args: ["--line-length", "79"] + exclude: '^(ThirdParty|models)/' diff --git a/binder/overview.ipynb b/binder/overview.ipynb index 9c4959f372..6d98f0f9fb 100644 --- a/binder/overview.ipynb +++ b/binder/overview.ipynb @@ -34,7 +34,8 @@ "\n", "* [Interfacing JAX](../python/examples/example_jax/ExampleJax.ipynb)\n", "\n", - " Provides guidance on how to combine AMICI with differential programming frameworks such as JAX.\n" + " Provides guidance on how to combine AMICI with differential programming frameworks such as JAX.\n", + "\n", "* [Efficient spline interpolation](../python/examples/example_splines/ExampleSplines.ipynb)\n", "\n", " Shows how to add annotated spline formulas to existing SBML models in order to speed up AMICI's model import.\n", diff --git a/documentation/ExampleJax.ipynb b/documentation/ExampleJax.ipynb index f572b14384..1899305b67 100644 --- a/documentation/ExampleJax.ipynb +++ b/documentation/ExampleJax.ipynb @@ -46,10 +46,10 @@ "output_type": "stream", "text": [ "Cloning into 'tmp/benchmark-models'...\n", - "remote: Enumerating objects: 336, done.\u001B[K\n", - "remote: Counting objects: 100% (336/336), done.\u001B[K\n", - "remote: Compressing objects: 100% (285/285), done.\u001B[K\n", - "remote: Total 336 (delta 88), reused 216 (delta 39), pack-reused 0\u001B[K\n", + "remote: Enumerating objects: 336, done.\u001b[K\n", + "remote: Counting objects: 100% (336/336), done.\u001b[K\n", + "remote: Compressing objects: 100% (285/285), done.\u001b[K\n", + "remote: Total 336 (delta 88), reused 216 (delta 39), pack-reused 0\u001b[K\n", "Receiving objects: 100% (336/336), 2.11 MiB | 7.48 MiB/s, done.\n", "Resolving deltas: 100% (88/88), done.\n" ] @@ -58,7 +58,8 @@ "source": [ "!git clone --depth 1 https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git tmp/benchmark-models || (cd tmp/benchmark-models && git pull)\n", "from pathlib import Path\n", - "folder_base = Path('.') / \"tmp\" / \"benchmark-models\" / \"Benchmark-Models\"" + "\n", + "folder_base = Path(\".\") / \"tmp\" / \"benchmark-models\" / \"Benchmark-Models\"" ] }, { @@ -77,6 +78,7 @@ "outputs": [], "source": [ "import petab\n", + "\n", "model_name = \"Boehm_JProteomeRes2014\"\n", "yaml_file = folder_base / model_name / (model_name + \".yaml\")\n", "petab_problem = petab.Problem.from_yaml(yaml_file)" @@ -570,6 +572,7 @@ ], "source": [ "from amici.petab_import import import_petab_problem\n", + "\n", "amici_model = import_petab_problem(petab_problem, force_compile=True)" ] }, @@ -606,14 +609,16 @@ "source": [ "from amici.petab_objective import simulate_petab\n", "import amici\n", + "\n", "amici_solver = amici_model.getSolver()\n", "amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n", "\n", + "\n", "def amici_hcb_base(parameters: jnp.array):\n", " return simulate_petab(\n", - " petab_problem, \n", - " amici_model, \n", - " problem_parameters=dict(zip(petab_problem.x_free_ids, parameters)), \n", + " petab_problem,\n", + " amici_model,\n", + " problem_parameters=dict(zip(petab_problem.x_free_ids, parameters)),\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", " )" @@ -635,13 +640,14 @@ "outputs": [], "source": [ "def amici_hcb_llh(parameters: jnp.array):\n", - " return amici_hcb_base(parameters)['llh']\n", + " return amici_hcb_base(parameters)[\"llh\"]\n", + "\n", "\n", "def amici_hcb_sllh(parameters: jnp.array):\n", - " sllh = amici_hcb_base(parameters)['sllh']\n", - " return jnp.asarray(tuple(\n", - " sllh[par_id] for par_id in petab_problem.x_free_ids\n", - " ))" + " sllh = amici_hcb_base(parameters)[\"sllh\"]\n", + " return jnp.asarray(\n", + " tuple(sllh[par_id] for par_id in petab_problem.x_free_ids)\n", + " )" ] }, { @@ -663,6 +669,8 @@ "from jax import custom_jvp\n", "\n", "import numpy as np\n", + "\n", + "\n", "@custom_jvp\n", "def jax_objective(parameters: jnp.array):\n", " return hcb.call(\n", @@ -695,7 +703,9 @@ " sllh = hcb.call(\n", " amici_hcb_sllh,\n", " parameters,\n", - " result_shape=jax.ShapeDtypeStruct((petab_problem.parameter_df.estimate.sum(),), np.float64),\n", + " result_shape=jax.ShapeDtypeStruct(\n", + " (petab_problem.parameter_df.estimate.sum(),), np.float64\n", + " ),\n", " )\n", " return llh, sllh.dot(x_dot)" ] @@ -717,19 +727,25 @@ "source": [ "from jax import value_and_grad\n", "\n", - "parameter_scales = petab_problem.parameter_df.loc[petab_problem.x_free_ids, petab.PARAMETER_SCALE].values\n", + "parameter_scales = petab_problem.parameter_df.loc[\n", + " petab_problem.x_free_ids, petab.PARAMETER_SCALE\n", + "].values\n", + "\n", "\n", "@jax.jit\n", "@value_and_grad\n", "def jax_objective_with_parameter_transform(parameters: jnp.array):\n", - " par_scaled = jnp.asarray(tuple(\n", - " value if scale == petab.LIN\n", - " else jnp.log(value) if scale == petab.LOG\n", - " else jnp.log10(value)\n", - " for value, scale in zip(parameters, parameter_scales)\n", - " ))\n", - " return jax_objective(par_scaled)\n", - " " + " par_scaled = jnp.asarray(\n", + " tuple(\n", + " value\n", + " if scale == petab.LIN\n", + " else jnp.log(value)\n", + " if scale == petab.LOG\n", + " else jnp.log10(value)\n", + " for value, scale in zip(parameters, parameter_scales)\n", + " )\n", + " )\n", + " return jax_objective(par_scaled)" ] }, { @@ -755,7 +771,9 @@ "metadata": {}, "outputs": [], "source": [ - "llh_jax, sllh_jax = jax_objective_with_parameter_transform(petab_problem.x_nominal_free)" + "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n", + " petab_problem.x_nominal_free\n", + ")" ] }, { @@ -777,7 +795,9 @@ "# TODO remove me as soon as sllh in simulate_petab is fixed\n", "sllh = {\n", " name: value / (np.log(10) * par_value)\n", - " for (name, value), par_value in zip(r['sllh'].items(), petab_problem.x_nominal_free)\n", + " for (name, value), par_value in zip(\n", + " r[\"sllh\"].items(), petab_problem.x_nominal_free\n", + " )\n", "}" ] }, @@ -802,7 +822,8 @@ ], "source": [ "import pandas as pd\n", - "pd.Series(dict(amici=r['llh'], jax=float(llh_jax)))" + "\n", + "pd.Series(dict(amici=r[\"llh\"], jax=float(llh_jax)))" ] }, { @@ -905,7 +926,9 @@ } ], "source": [ - "pd.DataFrame(index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax)))" + "pd.DataFrame(\n", + " index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax))\n", + ")" ] }, { @@ -925,7 +948,9 @@ "outputs": [], "source": [ "jax.config.update(\"jax_enable_x64\", True)\n", - "llh_jax, sllh_jax = jax_objective_with_parameter_transform(petab_problem.x_nominal_free)" + "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n", + " petab_problem.x_nominal_free\n", + ")" ] }, { @@ -956,7 +981,7 @@ } ], "source": [ - "pd.Series(dict(amici=r['llh'], jax=float(llh_jax)))" + "pd.Series(dict(amici=r[\"llh\"], jax=float(llh_jax)))" ] }, { @@ -1059,7 +1084,9 @@ } ], "source": [ - "pd.DataFrame(index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax)))" + "pd.DataFrame(\n", + " index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax))\n", + ")" ] } ], diff --git a/documentation/GettingStarted.ipynb b/documentation/GettingStarted.ipynb index 33258424b9..91fb9cb12c 100644 --- a/documentation/GettingStarted.ipynb +++ b/documentation/GettingStarted.ipynb @@ -26,7 +26,8 @@ "outputs": [], "source": [ "import amici\n", - "sbml_importer = amici.SbmlImporter('model_steadystate_scaled.xml')" + "\n", + "sbml_importer = amici.SbmlImporter(\"model_steadystate_scaled.xml\")" ] }, { @@ -42,8 +43,8 @@ "metadata": {}, "outputs": [], "source": [ - "model_name = 'model_steadystate'\n", - "model_dir = 'model_dir'\n", + "model_name = \"model_steadystate\"\n", + "model_dir = \"model_dir\"\n", "sbml_importer.sbml2amici(model_name, model_dir)" ] }, @@ -82,7 +83,7 @@ "metadata": {}, "outputs": [], "source": [ - "model.setParameterByName('p1',1e-3)" + "model.setParameterByName(\"p1\", 1e-3)" ] }, { @@ -122,7 +123,7 @@ "outputs": [], "source": [ "# set timepoints\n", - "model.setTimepoints([0,1])\n", + "model.setTimepoints([0, 1])\n", "rdata = amici.runAmiciSimulation(model, solver)" ] }, diff --git a/documentation/conf.py b/documentation/conf.py index 96209e4c31..ba88b25a8d 100644 --- a/documentation/conf.py +++ b/documentation/conf.py @@ -52,7 +52,9 @@ def my_exhale_generate_doxygen(doxygen_input): DomainDirectiveFactory as breathe_DomainDirectiveFactory, ) -old_breathe_DomainDirectiveFactory_create = breathe_DomainDirectiveFactory.create +old_breathe_DomainDirectiveFactory_create = ( + breathe_DomainDirectiveFactory.create +) def my_breathe_DomainDirectiveFactory_create(domain: str, args): @@ -67,7 +69,9 @@ def my_breathe_DomainDirectiveFactory_create(domain: str, args): return cls(domain + ":" + name, *args[1:]) -breathe_DomainDirectiveFactory.create = my_breathe_DomainDirectiveFactory_create +breathe_DomainDirectiveFactory.create = ( + my_breathe_DomainDirectiveFactory_create +) # END Monkeypatch breathe @@ -102,7 +106,9 @@ def install_doxygen(): subprocess.run(cmd, shell=True, check=True) assert os.path.islink(os.path.join(some_dir_on_path, "doxygen")) # verify it's available - res = subprocess.run(["doxygen", "--version"], check=False, capture_output=True) + res = subprocess.run( + ["doxygen", "--version"], check=False, capture_output=True + ) print(res.stdout.decode(), res.stderr.decode()) assert version in res.stdout.decode() @@ -176,7 +182,10 @@ def install_doxygen(): intersphinx_mapping = { "pysb": ("https://pysb.readthedocs.io/en/stable/", None), - "petab": ("https://petab.readthedocs.io/projects/libpetab-python/en/latest/", None), + "petab": ( + "https://petab.readthedocs.io/projects/libpetab-python/en/latest/", + None, + ), "pandas": ("https://pandas.pydata.org/docs/", None), "numpy": ("https://numpy.org/devdocs/", None), "sympy": ("https://docs.sympy.org/latest/", None), @@ -291,7 +300,9 @@ def install_doxygen(): "verboseBuild": True, } -mtocpp_filter = os.path.join(amici_dir, "matlab", "mtoc", "config", "mtocpp_filter.sh") +mtocpp_filter = os.path.join( + amici_dir, "matlab", "mtoc", "config", "mtocpp_filter.sh" +) exhale_projects_args = { "AMICI_CPP": { "exhaleDoxygenStdin": "\n".join( @@ -504,10 +515,14 @@ def process_docstring(app, what, name, obj, options, lines): for old, new in typemaps.items(): lines[i] = lines[i].replace(old, new) lines[i] = re.sub( - r"amici::(Model|Solver|ExpData) ", r":class:`amici\.amici\.\1\`", lines[i] + r"amici::(Model|Solver|ExpData) ", + r":class:`amici\.amici\.\1\`", + lines[i], ) lines[i] = re.sub( - r"amici::(runAmiciSimulation[s]?)", r":func:`amici\.amici\.\1`", lines[i] + r"amici::(runAmiciSimulation[s]?)", + r":func:`amici\.amici\.\1`", + lines[i], ) diff --git a/documentation/recreate_reference_list.py b/documentation/recreate_reference_list.py index 1dd1c13b4b..034c884c4b 100755 --- a/documentation/recreate_reference_list.py +++ b/documentation/recreate_reference_list.py @@ -42,7 +42,10 @@ def get_sub_bibliography(year, by_year, bibfile): entries = ",".join(["@" + x for x in by_year[year]]) stdin_input = ( - "---\n" f"bibliography: {bibfile}\n" f'nocite: "{entries}"\n...\n' f"# {year}" + "---\n" + f"bibliography: {bibfile}\n" + f'nocite: "{entries}"\n...\n' + f"# {year}" ) out = subprocess.run( @@ -67,7 +70,8 @@ def main(): with open(outfile, "w") as f: f.write("# References\n\n") f.write( - "List of publications using AMICI. " f"Total number is {num_total}.\n\n" + "List of publications using AMICI. " + f"Total number is {num_total}.\n\n" ) f.write( "If you applied AMICI in your work and your publication is " diff --git a/python/benchmark/benchmark_pysb.py b/python/benchmark/benchmark_pysb.py index e3b505300e..079041ed4d 100644 --- a/python/benchmark/benchmark_pysb.py +++ b/python/benchmark/benchmark_pysb.py @@ -33,7 +33,9 @@ with amici.add_path(os.path.dirname(pysb.examples.__file__)): with amici.add_path( - os.path.join(os.path.dirname(__file__), "..", "tests", "pysb_test_models") + os.path.join( + os.path.dirname(__file__), "..", "tests", "pysb_test_models" + ) ): pysb.SelfExporter.cleanup() # reset pysb pysb.SelfExporter.do_export = True @@ -52,9 +54,9 @@ integrator_options={"rtol": rtol, "atol": atol}, ) time_pysb = ( - timeit.Timer("pysb_simres = sim.run()", globals={"sim": sim}).timeit( - number=N_REPEATS - ) + timeit.Timer( + "pysb_simres = sim.run()", globals={"sim": sim} + ).timeit(number=N_REPEATS) / N_REPEATS ) @@ -76,7 +78,9 @@ observables=list(pysb_model.observables.keys()), ) - amici_model_module = amici.import_model_module(pysb_model.name, outdir) + amici_model_module = amici.import_model_module( + pysb_model.name, outdir + ) model_pysb = amici_model_module.getModel() @@ -89,7 +93,11 @@ time_amici = ( timeit.Timer( "rdata = amici.runAmiciSimulation(model, solver)", - globals={"model": model_pysb, "solver": solver, "amici": amici}, + globals={ + "model": model_pysb, + "solver": solver, + "amici": amici, + }, ).timeit(number=N_REPEATS) / N_REPEATS ) diff --git a/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb b/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb index 7c8ceec6cd..5f66ea4db9 100644 --- a/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb +++ b/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb @@ -60,7 +60,10 @@ ], "source": [ "from IPython.display import Image\n", - "fig = Image(filename=('../../../documentation/gfx/steadystate_solver_workflow.png'))\n", + "\n", + "fig = Image(\n", + " filename=(\"../../../documentation/gfx/steadystate_solver_workflow.png\")\n", + ")\n", "fig" ] }, @@ -102,11 +105,11 @@ "import matplotlib.pyplot as plt\n", "\n", "# SBML model we want to import\n", - "sbml_file = 'model_constant_species.xml'\n", + "sbml_file = \"model_constant_species.xml\"\n", "\n", "# Name of the models that will also be the name of the python module\n", - "model_name = 'model_constant_species'\n", - "model_reduced_name = model_name + '_reduced'\n", + "model_name = \"model_constant_species\"\n", + "model_reduced_name = model_name + \"_reduced\"\n", "\n", "# Directories to which the generated model code is written\n", "model_output_dir = model_name\n", @@ -118,18 +121,41 @@ "sbml_model = sbml_doc.getModel()\n", "dir(sbml_doc)\n", "\n", - "print('Species: ', [s.getId() for s in sbml_model.getListOfSpecies()])\n", + "print(\"Species: \", [s.getId() for s in sbml_model.getListOfSpecies()])\n", "\n", - "print('\\nReactions:')\n", + "print(\"\\nReactions:\")\n", "for reaction in sbml_model.getListOfReactions():\n", - " reactants = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfReactants()])\n", - " products = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfProducts()])\n", - " reversible = '<' if reaction.getReversible() else ''\n", - " print('%3s: %10s %1s->%10s\\t\\t[%s]' % (reaction.getId(),\n", - " reactants,\n", - " reversible,\n", - " products,\n", - " libsbml.formulaToL3String(reaction.getKineticLaw().getMath())))" + " reactants = \" + \".join(\n", + " [\n", + " \"%s %s\"\n", + " % (\n", + " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n", + " r.getSpecies(),\n", + " )\n", + " for r in reaction.getListOfReactants()\n", + " ]\n", + " )\n", + " products = \" + \".join(\n", + " [\n", + " \"%s %s\"\n", + " % (\n", + " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n", + " r.getSpecies(),\n", + " )\n", + " for r in reaction.getListOfProducts()\n", + " ]\n", + " )\n", + " reversible = \"<\" if reaction.getReversible() else \"\"\n", + " print(\n", + " \"%3s: %10s %1s->%10s\\t\\t[%s]\"\n", + " % (\n", + " reaction.getId(),\n", + " reactants,\n", + " reversible,\n", + " products,\n", + " libsbml.formulaToL3String(reaction.getKineticLaw().getMath()),\n", + " )\n", + " )" ] }, { @@ -142,25 +168,29 @@ "sbml_importer = amici.SbmlImporter(sbml_file)\n", "\n", "# specify observables and constant parameters\n", - "constantParameters = ['synthesis_substrate', 'init_enzyme']\n", + "constantParameters = [\"synthesis_substrate\", \"init_enzyme\"]\n", "observables = {\n", - " 'observable_product': {'name': '', 'formula': 'product'},\n", - " 'observable_substrate': {'name': '', 'formula': 'substrate'},\n", + " \"observable_product\": {\"name\": \"\", \"formula\": \"product\"},\n", + " \"observable_substrate\": {\"name\": \"\", \"formula\": \"substrate\"},\n", "}\n", - "sigmas = {'observable_product': 1.0, 'observable_substrate': 1.0}\n", + "sigmas = {\"observable_product\": 1.0, \"observable_substrate\": 1.0}\n", "\n", "# import the model\n", - "sbml_importer.sbml2amici(model_reduced_name,\n", - " model_reduced_output_dir,\n", - " observables=observables,\n", - " constant_parameters=constantParameters,\n", - " sigmas=sigmas)\n", - "sbml_importer.sbml2amici(model_name,\n", - " model_output_dir,\n", - " observables=observables,\n", - " constant_parameters=constantParameters,\n", - " sigmas=sigmas,\n", - " compute_conservation_laws=False)" + "sbml_importer.sbml2amici(\n", + " model_reduced_name,\n", + " model_reduced_output_dir,\n", + " observables=observables,\n", + " constant_parameters=constantParameters,\n", + " sigmas=sigmas,\n", + ")\n", + "sbml_importer.sbml2amici(\n", + " model_name,\n", + " model_output_dir,\n", + " observables=observables,\n", + " constant_parameters=constantParameters,\n", + " sigmas=sigmas,\n", + " compute_conservation_laws=False,\n", + ")" ] }, { @@ -219,10 +249,14 @@ ], "source": [ "# import the models and run some test simulations\n", - "model_reduced_module = amici.import_model_module(model_reduced_name, os.path.abspath(model_reduced_output_dir))\n", + "model_reduced_module = amici.import_model_module(\n", + " model_reduced_name, os.path.abspath(model_reduced_output_dir)\n", + ")\n", "model_reduced = model_reduced_module.getModel()\n", "\n", - "model_module = amici.import_model_module(model_name, os.path.abspath(model_output_dir))\n", + "model_module = amici.import_model_module(\n", + " model_name, os.path.abspath(model_output_dir)\n", + ")\n", "model = model_module.getModel()\n", "\n", "\n", @@ -238,6 +272,7 @@ "\n", "# plot trajectories\n", "import amici.plotting\n", + "\n", "amici.plotting.plotStateTrajectories(rdata_reduced, model=model_reduced)\n", "amici.plotting.plotObservableTrajectories(rdata_reduced, model=model_reduced)\n", "\n", @@ -336,9 +371,9 @@ "solver.setMaxSteps(1000)\n", "rdata = amici.runAmiciSimulation(model, solver)\n", "\n", - "#np.set_printoptions(threshold=8, edgeitems=2)\n", + "# np.set_printoptions(threshold=8, edgeitems=2)\n", "for key, value in rdata.items():\n", - " print('%12s: ' % key, value)" + " print(\"%12s: \" % key, value)" ] }, { @@ -399,8 +434,10 @@ "# reduce maxsteps for integration\n", "solver.setMaxSteps(100)\n", "rdata = amici.runAmiciSimulation(model, solver)\n", - "print('Status of postequilibration:', rdata['posteq_status'])\n", - "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])" + "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n", + "print(\n", + " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n", + ")" ] }, { @@ -437,8 +474,11 @@ "solver_reduced.setMaxSteps(100)\n", "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced)\n", "\n", - "print('Status of postequilibration:', rdata_reduced['posteq_status'])\n", - "print('Number of steps employed in postequilibration:', rdata_reduced['posteq_numsteps'])" + "print(\"Status of postequilibration:\", rdata_reduced[\"posteq_status\"])\n", + "print(\n", + " \"Number of steps employed in postequilibration:\",\n", + " rdata_reduced[\"posteq_numsteps\"],\n", + ")" ] }, { @@ -498,7 +538,9 @@ "source": [ "# Call simulation with singular Jacobian and integrateIfNewtonFails mode\n", "model.setTimepoints(np.full(1, np.inf))\n", - "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrateIfNewtonFails)\n", + "model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n", + ")\n", "solver = model.getSolver()\n", "solver.setNewtonMaxSteps(10)\n", "solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n", @@ -506,10 +548,12 @@ "solver.setMaxSteps(10000)\n", "rdata = amici.runAmiciSimulation(model, solver)\n", "\n", - "print('Status of postequilibration:', rdata['posteq_status'])\n", - "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])\n", - "print('Computed state sensitivities:')\n", - "print(rdata['sx'][0,:,:])" + "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n", + "print(\n", + " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n", + ")\n", + "print(\"Computed state sensitivities:\")\n", + "print(rdata[\"sx\"][0, :, :])" ] }, { @@ -550,17 +594,21 @@ "source": [ "# Call simulation with singular Jacobian and newtonOnly mode (will fail)\n", "model.setTimepoints(np.full(1, np.inf))\n", - "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n", + "model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.newtonOnly\n", + ")\n", "solver = model.getSolver()\n", "solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n", "solver.setSensitivityOrder(amici.SensitivityOrder.first)\n", "solver.setMaxSteps(10000)\n", "rdata = amici.runAmiciSimulation(model, solver)\n", "\n", - "print('Status of postequilibration:', rdata['posteq_status'])\n", - "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])\n", - "print('Computed state sensitivities:')\n", - "print(rdata['sx'][0,:,:])" + "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n", + "print(\n", + " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n", + ")\n", + "print(\"Computed state sensitivities:\")\n", + "print(rdata[\"sx\"][0, :, :])" ] }, { @@ -586,7 +634,9 @@ "source": [ "# Call postequilibration by setting an infinity timepoint\n", "model_reduced.setTimepoints(np.full(1, np.inf))\n", - "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n", + "model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.newtonOnly\n", + ")\n", "solver_reduced = model_reduced.getSolver()\n", "solver_reduced.setNewtonMaxSteps(10)\n", "solver_reduced.setSensitivityMethod(amici.SensitivityMethod.forward)\n", @@ -594,10 +644,13 @@ "solver_reduced.setMaxSteps(1000)\n", "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced)\n", "\n", - "print('Status of postequilibration:', rdata_reduced['posteq_status'])\n", - "print('Number of steps employed in postequilibration:', rdata_reduced['posteq_numsteps'])\n", - "print('Computed state sensitivities:')\n", - "print(rdata_reduced['sx'][0,:,:])" + "print(\"Status of postequilibration:\", rdata_reduced[\"posteq_status\"])\n", + "print(\n", + " \"Number of steps employed in postequilibration:\",\n", + " rdata_reduced[\"posteq_numsteps\"],\n", + ")\n", + "print(\"Computed state sensitivities:\")\n", + "print(rdata_reduced[\"sx\"][0, :, :])" ] }, { @@ -646,11 +699,13 @@ "source": [ "# Call adjoint postequilibration by setting an infinity timepoint\n", "# and create an edata object, which is needed for adjoint computation\n", - "edata = amici.ExpData(2, 0, 0, np.array([float('inf')]))\n", + "edata = amici.ExpData(2, 0, 0, np.array([float(\"inf\")]))\n", "edata.setObservedData([1.8] * 2)\n", - "edata.fixedParameters = np.array([3., 5.])\n", + "edata.fixedParameters = np.array([3.0, 5.0])\n", "\n", - "model_reduced.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n", + "model_reduced.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.newtonOnly\n", + ")\n", "solver_reduced = model_reduced.getSolver()\n", "solver_reduced.setNewtonMaxSteps(10)\n", "solver_reduced.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n", @@ -658,10 +713,16 @@ "solver_reduced.setMaxSteps(1000)\n", "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", "\n", - "print('Status of postequilibration:', rdata_reduced['posteq_status'])\n", - "print('Number of steps employed in postequilibration:', rdata_reduced['posteq_numsteps'])\n", - "print('Number of backward steps employed in postequilibration:', rdata_reduced['posteq_numstepsB'])\n", - "print('Computed gradient:', rdata_reduced['sllh'])" + "print(\"Status of postequilibration:\", rdata_reduced[\"posteq_status\"])\n", + "print(\n", + " \"Number of steps employed in postequilibration:\",\n", + " rdata_reduced[\"posteq_numsteps\"],\n", + ")\n", + "print(\n", + " \"Number of backward steps employed in postequilibration:\",\n", + " rdata_reduced[\"posteq_numstepsB\"],\n", + ")\n", + "print(\"Computed gradient:\", rdata_reduced[\"sllh\"])" ] }, { @@ -691,17 +752,24 @@ ], "source": [ "# Call adjoint postequilibration with model with singular Jacobian\n", - "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n", + "model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.newtonOnly\n", + ")\n", "solver = model.getSolver()\n", "solver.setNewtonMaxSteps(10)\n", "solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n", "solver.setSensitivityOrder(amici.SensitivityOrder.first)\n", "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "\n", - "print('Status of postequilibration:', rdata['posteq_status'])\n", - "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])\n", - "print('Number of backward steps employed in postequilibration:', rdata['posteq_numstepsB'])\n", - "print('Computed gradient:', rdata['sllh'])" + "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n", + "print(\n", + " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n", + ")\n", + "print(\n", + " \"Number of backward steps employed in postequilibration:\",\n", + " rdata[\"posteq_numstepsB\"],\n", + ")\n", + "print(\"Computed gradient:\", rdata[\"sllh\"])" ] }, { @@ -720,11 +788,10 @@ "outputs": [], "source": [ "# create edata, with 3 timepoints and 2 observables:\n", - "edata = amici.ExpData(2, 0, 0,\n", - " np.array([0., 0.1, 1.]))\n", + "edata = amici.ExpData(2, 0, 0, np.array([0.0, 0.1, 1.0]))\n", "edata.setObservedData([1.8] * 6)\n", - "edata.fixedParameters = np.array([3., 5.])\n", - "edata.fixedParametersPreequilibration = np.array([0., 2.])\n", + "edata.fixedParameters = np.array([3.0, 5.0])\n", + "edata.fixedParametersPreequilibration = np.array([0.0, 2.0])\n", "edata.reinitializeFixedParameterInitialStates = True" ] }, @@ -764,8 +831,8 @@ "solver_reduced.setNewtonMaxSteps(10)\n", "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", "\n", - "amici.plotting.plotStateTrajectories(rdata_reduced, model = model_reduced)\n", - "amici.plotting.plotObservableTrajectories(rdata_reduced, model = model_reduced)" + "amici.plotting.plotStateTrajectories(rdata_reduced, model=model_reduced)\n", + "amici.plotting.plotObservableTrajectories(rdata_reduced, model=model_reduced)" ] }, { @@ -782,7 +849,7 @@ "outputs": [], "source": [ "# Change the last timepoint to an infinity timepoint.\n", - "edata.setTimepoints(np.array([0., 0.1, float('inf')]))\n", + "edata.setTimepoints(np.array([0.0, 0.1, float(\"inf\")]))\n", "\n", "# run the simulation\n", "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)" @@ -844,10 +911,12 @@ ], "source": [ "# No postquilibration this time.\n", - "edata.setTimepoints(np.array([0., 0.1, 1.]))\n", + "edata.setTimepoints(np.array([0.0, 0.1, 1.0]))\n", "\n", "# create the solver object and run the simulation, singular Jacobian, enforce Newton solver for sensitivities\n", - "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n", + "model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.newtonOnly\n", + ")\n", "solver = model.getSolver()\n", "solver.setNewtonMaxSteps(10)\n", "solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n", @@ -855,8 +924,8 @@ "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "\n", "for key, value in rdata.items():\n", - " if key[0:6] == 'preeq_':\n", - " print('%20s: ' % key, value)" + " if key[0:6] == \"preeq_\":\n", + " print(\"%20s: \" % key, value)" ] }, { @@ -883,7 +952,9 @@ ], "source": [ "# Singluar Jacobian, use simulation\n", - "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrateIfNewtonFails)\n", + "model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n", + ")\n", "solver = model.getSolver()\n", "solver.setNewtonMaxSteps(10)\n", "solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n", @@ -891,8 +962,8 @@ "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "\n", "for key, value in rdata.items():\n", - " if key[0:6] == 'preeq_':\n", - " print('%20s: ' % key, value)" + " if key[0:6] == \"preeq_\":\n", + " print(\"%20s: \" % key, value)" ] }, { @@ -924,8 +995,8 @@ "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", "\n", "for key, value in rdata_reduced.items():\n", - " if key[0:6] == 'preeq_':\n", - " print('%20s: ' % key, value)" + " if key[0:6] == \"preeq_\":\n", + " print(\"%20s: \" % key, value)" ] }, { @@ -975,9 +1046,9 @@ "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", "\n", "for key, value in rdata_reduced.items():\n", - " if key[0:6] == 'preeq_':\n", - " print('%20s: ' % key, value)\n", - "print('Gradient:', rdata_reduced['sllh'])" + " if key[0:6] == \"preeq_\":\n", + " print(\"%20s: \" % key, value)\n", + "print(\"Gradient:\", rdata_reduced[\"sllh\"])" ] }, { @@ -1010,9 +1081,9 @@ "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", "\n", "for key, value in rdata_reduced.items():\n", - " if key[0:6] == 'preeq_':\n", - " print('%20s: ' % key, value)\n", - "print('Gradient:', rdata_reduced['sllh'])" + " if key[0:6] == \"preeq_\":\n", + " print(\"%20s: \" % key, value)\n", + "print(\"Gradient:\", rdata_reduced[\"sllh\"])" ] }, { @@ -1041,14 +1112,16 @@ "solver_reduced = model_reduced.getSolver()\n", "solver_reduced.setNewtonMaxSteps(10)\n", "solver_reduced.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n", - "solver_reduced.setSensitivityMethodPreequilibration(amici.SensitivityMethod.adjoint)\n", + "solver_reduced.setSensitivityMethodPreequilibration(\n", + " amici.SensitivityMethod.adjoint\n", + ")\n", "solver_reduced.setSensitivityOrder(amici.SensitivityOrder.first)\n", "rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", "\n", "for key, value in rdata_reduced.items():\n", - " if key[0:6] == 'preeq_':\n", - " print('%20s: ' % key, value)\n", - "print('Gradient:', rdata_reduced['sllh'])" + " if key[0:6] == \"preeq_\":\n", + " print(\"%20s: \" % key, value)\n", + "print(\"Gradient:\", rdata_reduced[\"sllh\"])" ] }, { @@ -1089,9 +1162,9 @@ "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "\n", "for key, value in rdata.items():\n", - " if key[0:6] == 'preeq_':\n", - " print('%20s: ' % key, value)\n", - "print('Gradient:', rdata['sllh'])" + " if key[0:6] == \"preeq_\":\n", + " print(\"%20s: \" % key, value)\n", + "print(\"Gradient:\", rdata[\"sllh\"])" ] }, { @@ -1135,7 +1208,9 @@ ], "source": [ "# Non-singular Jacobian, use simulaiton\n", - "model_reduced.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrateIfNewtonFails)\n", + "model_reduced.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n", + ")\n", "solver_reduced = model_reduced.getSolver()\n", "solver_reduced.setNewtonMaxSteps(0)\n", "solver_reduced.setSensitivityMethod(amici.SensitivityMethod.forward)\n", @@ -1146,27 +1221,31 @@ "solver_reduced.setAbsoluteToleranceSteadyState(1e-3)\n", "solver_reduced.setRelativeToleranceSteadyStateSensi(1e-2)\n", "solver_reduced.setAbsoluteToleranceSteadyStateSensi(1e-3)\n", - "rdata_reduced_lax = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", + "rdata_reduced_lax = amici.runAmiciSimulation(\n", + " model_reduced, solver_reduced, edata\n", + ")\n", "\n", "# run with strict tolerances\n", "solver_reduced.setRelativeToleranceSteadyState(1e-12)\n", "solver_reduced.setAbsoluteToleranceSteadyState(1e-16)\n", "solver_reduced.setRelativeToleranceSteadyStateSensi(1e-12)\n", "solver_reduced.setAbsoluteToleranceSteadyStateSensi(1e-16)\n", - "rdata_reduced_strict = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n", + "rdata_reduced_strict = amici.runAmiciSimulation(\n", + " model_reduced, solver_reduced, edata\n", + ")\n", "\n", "# compare ODE outputs\n", - "print('\\nODE solver steps, which were necessary to reach steady state:')\n", - "print('lax tolerances: ', rdata_reduced_lax['preeq_numsteps'])\n", - "print('strict tolerances: ', rdata_reduced_strict['preeq_numsteps'])\n", + "print(\"\\nODE solver steps, which were necessary to reach steady state:\")\n", + "print(\"lax tolerances: \", rdata_reduced_lax[\"preeq_numsteps\"])\n", + "print(\"strict tolerances: \", rdata_reduced_strict[\"preeq_numsteps\"])\n", "\n", - "print('\\nsimulation time corresponding to steady state:')\n", - "print(rdata_reduced_lax['preeq_t'])\n", - "print(rdata_reduced_strict['preeq_t'])\n", + "print(\"\\nsimulation time corresponding to steady state:\")\n", + "print(rdata_reduced_lax[\"preeq_t\"])\n", + "print(rdata_reduced_strict[\"preeq_t\"])\n", "\n", - "print('\\ncomputation time to reach steady state:')\n", - "print(rdata_reduced_lax['preeq_cpu_time'])\n", - "print(rdata_reduced_strict['preeq_cpu_time'])" + "print(\"\\ncomputation time to reach steady state:\")\n", + "print(rdata_reduced_lax[\"preeq_cpu_time\"])\n", + "print(rdata_reduced_strict[\"preeq_cpu_time\"])" ] } ], diff --git a/python/examples/example_errors.ipynb b/python/examples/example_errors.ipynb index 988afb98a3..5e07803d96 100644 --- a/python/examples/example_errors.ipynb +++ b/python/examples/example_errors.ipynb @@ -75,7 +75,9 @@ "outputs": [], "source": [ "petab_problem = benchmark_models_petab.get_problem(\"Fujita_SciSignal2010\")\n", - "amici_model = import_petab_problem(petab_problem, verbose=False, force_compile=False)\n", + "amici_model = import_petab_problem(\n", + " petab_problem, verbose=False, force_compile=False\n", + ")\n", "\n", "np.random.seed(2991)\n", "problem_parameters = dict(\n", @@ -85,13 +87,25 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", - " scaled_parameters=True\n", + " scaled_parameters=True,\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", - "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_SUCCESS', 'AMICI_SUCCESS', 'AMICI_SUCCESS', 'AMICI_TOO_MUCH_WORK', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']" + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", + "assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + "] == [\n", + " \"AMICI_SUCCESS\",\n", + " \"AMICI_SUCCESS\",\n", + " \"AMICI_SUCCESS\",\n", + " \"AMICI_TOO_MUCH_WORK\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + "]" ] }, { @@ -127,14 +141,17 @@ "amici_solver.setMaxSteps(10 * amici_solver.getMaxSteps())\n", "\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", - " solver=amici_solver\n", + " solver=amici_solver,\n", ")\n", "\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n", "print(\"Simulations finished succesfully.\")\n", "print()\n", @@ -146,15 +163,18 @@ "amici_solver.setRelativeTolerance(50 * amici_solver.getRelativeTolerance())\n", "\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", - " solver=amici_solver\n", + " solver=amici_solver,\n", + ")\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n", - "print(\"Simulations finished succesfully.\")\n" + "print(\"Simulations finished succesfully.\")" ] }, { @@ -185,13 +205,18 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", - " scaled_parameters=True\n", + " scaled_parameters=True,\n", + ")\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", - "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_TOO_MUCH_WORK']" + "assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + "] == [\"AMICI_TOO_MUCH_WORK\"]" ] }, { @@ -284,14 +309,26 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", - " scaled_parameters=True\n", + " scaled_parameters=True,\n", + ")\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", "\n", - "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_SUCCESS', 'AMICI_ERR_FAILURE', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']" + "assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + "] == [\n", + " \"AMICI_SUCCESS\",\n", + " \"AMICI_ERR_FAILURE\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + "]" ] }, { @@ -352,13 +389,16 @@ "amici_solver.setRelativeTolerance(amici_solver.getRelativeTolerance() / 10)\n", "\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", - " solver=amici_solver\n", + " solver=amici_solver,\n", + ")\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n", "print(\"Simulations finished succesfully.\")" ] @@ -381,7 +421,9 @@ "outputs": [], "source": [ "petab_problem = benchmark_models_petab.get_problem(\"Weber_BMC2015\")\n", - "amici_model = import_petab_problem(petab_problem, verbose=False, force_compile=False)\n", + "amici_model = import_petab_problem(\n", + " petab_problem, verbose=False, force_compile=False\n", + ")\n", "\n", "np.random.seed(4)\n", "problem_parameters = dict(\n", @@ -391,13 +433,21 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", - " scaled_parameters=True\n", + " scaled_parameters=True,\n", + ")\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", - "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR', 'AMICI_NOT_RUN']" + "assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + "] == [\n", + " \"AMICI_ERROR\",\n", + " \"AMICI_NOT_RUN\",\n", + "]" ] }, { @@ -438,13 +488,16 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", - " solver=amici_solver\n", + " solver=amici_solver,\n", + ")\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])" ] }, @@ -476,13 +529,18 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", - " scaled_parameters=True\n", + " scaled_parameters=True,\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", - "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_FIRST_RHSFUNC_ERR']" + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", + "assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + "] == [\"AMICI_FIRST_RHSFUNC_ERR\"]" ] }, { @@ -571,11 +629,16 @@ "source": [ "# we have to account for the chosen parameter scale\n", "from itertools import starmap\n", - "unscaled_parameter = dict(zip(\n", - " amici_model.getParameterIds(),\n", - " starmap(amici.getUnscaledParameter, zip(edata.parameters, edata.pscale)),\n", - "))\n", - "print(dict((p, unscaled_parameter[p]) for p in ('Kd', 'Kp', 'n_par')))" + "\n", + "unscaled_parameter = dict(\n", + " zip(\n", + " amici_model.getParameterIds(),\n", + " starmap(\n", + " amici.getUnscaledParameter, zip(edata.parameters, edata.pscale)\n", + " ),\n", + " )\n", + ")\n", + "print(dict((p, unscaled_parameter[p]) for p in (\"Kd\", \"Kp\", \"n_par\")))" ] }, { @@ -594,7 +657,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"{x0['Z_state']**unscaled_parameter['n_par'] + unscaled_parameter['Kd']**unscaled_parameter['n_par']=}\")" + "print(\n", + " f\"{x0['Z_state']**unscaled_parameter['n_par'] + unscaled_parameter['Kd']**unscaled_parameter['n_par']=}\"\n", + ")" ] }, { @@ -631,16 +696,18 @@ "with suppress(KeyError):\n", " del os.environ[\"AMICI_EXPERIMENTAL_SBML_NONCONST_CLS\"]\n", "amici_model = import_petab_problem(\n", - " petab_problem, \n", + " petab_problem,\n", " verbose=False,\n", " force_compile=True,\n", - " model_name=\"Blasi_CellSystems2016_1\"\n", + " model_name=\"Blasi_CellSystems2016_1\",\n", ")\n", "\n", "amici_solver = amici_model.getSolver()\n", "amici_solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n", "amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n", - "amici_model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n", + "amici_model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.newtonOnly\n", + ")\n", "\n", "np.random.seed(2020)\n", "problem_parameters = dict(\n", @@ -650,17 +717,22 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "\n", "# hard to reproduce on GHA\n", - "if os.getenv('GITHUB_ACTIONS') is None:\n", - " assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR']" + "if os.getenv(\"GITHUB_ACTIONS\") is None:\n", + " assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + " ] == [\"AMICI_ERROR\"]" ] }, { @@ -711,16 +783,21 @@ "outputs": [], "source": [ "# use numerical integration\n", - "amici_model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrationOnly)\n", + "amici_model.setSteadyStateSensitivityMode(\n", + " amici.SteadyStateSensitivityMode.integrationOnly\n", + ")\n", "\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])" ] }, @@ -736,7 +813,7 @@ "# this is enabled by the `AMICI_EXPERIMENTAL_SBML_NONCONST_CLS` environment variable\n", "os.environ[\"AMICI_EXPERIMENTAL_SBML_NONCONST_CLS\"] = \"1\"\n", "amici_model = import_petab_problem(\n", - " petab_problem, \n", + " petab_problem,\n", " verbose=False,\n", " # we need a different model name if we import the model again\n", " # we cannot load a model with the same name as an already loaded model\n", @@ -750,13 +827,16 @@ "amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n", "\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", ")\n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])" ] }, @@ -779,7 +859,7 @@ "source": [ "petab_problem = benchmark_models_petab.get_problem(\"Brannmark_JBC2010\")\n", "amici_model = import_petab_problem(\n", - " petab_problem, \n", + " petab_problem,\n", " verbose=False,\n", ")\n", "\n", @@ -794,18 +874,32 @@ " )\n", ")\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", ")\n", - " \n", - "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "\n", + "print(\n", + " \"Status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "\n", "# hard to reproduce on GHA\n", - "if os.getenv('GITHUB_ACTIONS') is None:\n", - " assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']" + "if os.getenv(\"GITHUB_ACTIONS\") is None:\n", + " assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + " ] == [\n", + " \"AMICI_ERROR\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " ]" ] }, { @@ -858,23 +952,30 @@ "source": [ "# Reduce relative tolerance for integration\n", "amici_solver = amici_model.getSolver()\n", - "amici_solver.setRelativeTolerance(1/100 * amici_solver.getRelativeTolerance())\n", + "amici_solver.setRelativeTolerance(\n", + " 1 / 100 * amici_solver.getRelativeTolerance()\n", + ")\n", "\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", ")\n", - "print(\"status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "print(\n", + " \"status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "\n", "rdata = res[RDATAS][0]\n", - "print(f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\")\n", + "print(\n", + " f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\"\n", + ")\n", "print(f\"{rdata.preeq_numsteps=}\")\n", "\n", "# hard to reproduce on GHA\n", - "if os.getenv('GITHUB_ACTIONS') is None:\n", + "if os.getenv(\"GITHUB_ACTIONS\") is None:\n", " assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])" ] }, @@ -889,25 +990,35 @@ "for log10_relaxation_factor in range(1, 10):\n", " print(f\"Relaxing tolerances by factor {10 ** log10_relaxation_factor}\")\n", " amici_solver = amici_model.getSolver()\n", - " amici_solver.setRelativeToleranceSteadyState(amici_solver.getRelativeToleranceSteadyState() * 10 ** log10_relaxation_factor)\n", - " \n", + " amici_solver.setRelativeToleranceSteadyState(\n", + " amici_solver.getRelativeToleranceSteadyState()\n", + " * 10**log10_relaxation_factor\n", + " )\n", + "\n", " res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", " )\n", " if all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS]):\n", - " print(f\"-> Succeeded with relative steady state tolerance {amici_solver.getRelativeToleranceSteadyState()}\\n\")\n", + " print(\n", + " f\"-> Succeeded with relative steady state tolerance {amici_solver.getRelativeToleranceSteadyState()}\\n\"\n", + " )\n", " break\n", " else:\n", " print(\"-> Failed.\\n\")\n", "\n", - "print(\"status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "print(\n", + " \"status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "\n", "rdata = res[RDATAS][0]\n", - "print(f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\")\n", + "print(\n", + " f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\"\n", + ")\n", "print(f\"{rdata.preeq_numsteps=}\")\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])" ] @@ -928,26 +1039,42 @@ "outputs": [], "source": [ "# Let's try increasing the number of Newton steps\n", - "# (this is 0 by default, so the Newton solver wasn't used before, \n", + "# (this is 0 by default, so the Newton solver wasn't used before,\n", "# as can be seen from the 0 in `rdata.preeq_numsteps[0]`)\n", "amici_solver = amici_model.getSolver()\n", "amici_solver.setNewtonMaxSteps(10**4)\n", "\n", "res = simulate_petab(\n", - " petab_problem=petab_problem, \n", + " petab_problem=petab_problem,\n", " amici_model=amici_model,\n", " problem_parameters=problem_parameters,\n", " scaled_parameters=True,\n", " solver=amici_solver,\n", ")\n", - "print(\"status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n", + "print(\n", + " \"status:\",\n", + " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", + ")\n", "\n", "rdata = res[RDATAS][0]\n", - "print(f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\")\n", + "print(\n", + " f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\"\n", + ")\n", "print(f\"{rdata.preeq_numsteps=}\")\n", "# hard to reproduce on GHA\n", - "if os.getenv('GITHUB_ACTIONS') is None:\n", - " assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']" + "if os.getenv(\"GITHUB_ACTIONS\") is None:\n", + " assert [\n", + " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n", + " ] == [\n", + " \"AMICI_ERROR\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " \"AMICI_NOT_RUN\",\n", + " ]" ] }, { diff --git a/python/examples/example_jax/ExampleJax.ipynb b/python/examples/example_jax/ExampleJax.ipynb index 9d79674287..efda5b458e 100644 --- a/python/examples/example_jax/ExampleJax.ipynb +++ b/python/examples/example_jax/ExampleJax.ipynb @@ -46,8 +46,9 @@ "outputs": [], "source": [ "import petab\n", + "\n", "model_name = \"Boehm_JProteomeRes2014\"\n", - "yaml_file = f'https://raw.githubusercontent.com/Benchmarking-Initiative/Benchmark-Models-PEtab/master/Benchmark-Models/{model_name}/{model_name}.yaml'\n", + "yaml_file = f\"https://raw.githubusercontent.com/Benchmarking-Initiative/Benchmark-Models-PEtab/master/Benchmark-Models/{model_name}/{model_name}.yaml\"\n", "petab_problem = petab.Problem.from_yaml(yaml_file)" ] }, @@ -262,7 +263,10 @@ "outputs": [], "source": [ "from amici.petab_import import import_petab_problem\n", - "amici_model = import_petab_problem(petab_problem, force_compile=True, verbose=False)" + "\n", + "amici_model = import_petab_problem(\n", + " petab_problem, force_compile=True, verbose=False\n", + ")" ] }, { @@ -292,13 +296,15 @@ "source": [ "from amici.petab_objective import simulate_petab\n", "import amici\n", + "\n", "amici_solver = amici_model.getSolver()\n", "amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n", "\n", + "\n", "def amici_hcb_base(parameters: jnp.array):\n", " return simulate_petab(\n", - " petab_problem, \n", - " amici_model, \n", + " petab_problem,\n", + " amici_model,\n", " problem_parameters=dict(zip(petab_problem.x_free_ids, parameters)),\n", " solver=amici_solver,\n", " )" @@ -320,13 +326,14 @@ "outputs": [], "source": [ "def amici_hcb_llh(parameters: jnp.array):\n", - " return amici_hcb_base(parameters)['llh']\n", + " return amici_hcb_base(parameters)[\"llh\"]\n", + "\n", "\n", "def amici_hcb_sllh(parameters: jnp.array):\n", - " sllh = amici_hcb_base(parameters)['sllh']\n", - " return jnp.asarray(tuple(\n", - " sllh[par_id] for par_id in petab_problem.x_free_ids\n", - " ))" + " sllh = amici_hcb_base(parameters)[\"sllh\"]\n", + " return jnp.asarray(\n", + " tuple(sllh[par_id] for par_id in petab_problem.x_free_ids)\n", + " )" ] }, { @@ -348,6 +355,8 @@ "from jax import custom_jvp\n", "\n", "import numpy as np\n", + "\n", + "\n", "@custom_jvp\n", "def jax_objective(parameters: jnp.array):\n", " return hcb.call(\n", @@ -380,7 +389,9 @@ " sllh = hcb.call(\n", " amici_hcb_sllh,\n", " parameters,\n", - " result_shape=jax.ShapeDtypeStruct((petab_problem.parameter_df.estimate.sum(),), np.float64),\n", + " result_shape=jax.ShapeDtypeStruct(\n", + " (petab_problem.parameter_df.estimate.sum(),), np.float64\n", + " ),\n", " )\n", " return llh, sllh.dot(x_dot)" ] @@ -402,18 +413,25 @@ "source": [ "from jax import value_and_grad\n", "\n", - "parameter_scales = petab_problem.parameter_df.loc[petab_problem.x_free_ids, petab.PARAMETER_SCALE].values\n", + "parameter_scales = petab_problem.parameter_df.loc[\n", + " petab_problem.x_free_ids, petab.PARAMETER_SCALE\n", + "].values\n", + "\n", "\n", "@jax.jit\n", "@value_and_grad\n", "def jax_objective_with_parameter_transform(parameters: jnp.array):\n", - " par_scaled = jnp.asarray(tuple(\n", - " value if scale == petab.LIN\n", - " else jnp.exp(value) if scale == petab.LOG\n", - " else jnp.power(10, value)\n", - " for value, scale in zip(parameters, parameter_scales)\n", - " ))\n", - " return jax_objective(par_scaled)\n" + " par_scaled = jnp.asarray(\n", + " tuple(\n", + " value\n", + " if scale == petab.LIN\n", + " else jnp.exp(value)\n", + " if scale == petab.LOG\n", + " else jnp.power(10, value)\n", + " for value, scale in zip(parameters, parameter_scales)\n", + " )\n", + " )\n", + " return jax_objective(par_scaled)" ] }, { @@ -445,7 +463,9 @@ "metadata": {}, "outputs": [], "source": [ - "llh_jax, sllh_jax = jax_objective_with_parameter_transform(scaled_parameters_np)" + "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n", + " scaled_parameters_np\n", + ")" ] }, { @@ -464,12 +484,12 @@ "outputs": [], "source": [ "r = simulate_petab(\n", - " petab_problem, \n", - " amici_model, \n", + " petab_problem,\n", + " amici_model,\n", " solver=amici_solver,\n", " scaled_parameters=True,\n", " scaled_gradients=True,\n", - " problem_parameters=scaled_parameters\n", + " problem_parameters=scaled_parameters,\n", ")" ] }, @@ -528,7 +548,15 @@ ], "source": [ "import pandas as pd\n", - "pd.DataFrame(dict(amici=r['llh'], jax=float(llh_jax), rel_diff=(r['llh']-float(llh_jax))/r['llh']), index=('llh',))" + "\n", + "pd.DataFrame(\n", + " dict(\n", + " amici=r[\"llh\"],\n", + " jax=float(llh_jax),\n", + " rel_diff=(r[\"llh\"] - float(llh_jax)) / r[\"llh\"],\n", + " ),\n", + " index=(\"llh\",),\n", + ")" ] }, { @@ -641,10 +669,13 @@ } ], "source": [ - "grad_amici=np.asarray(list(r['sllh'].values()))\n", - "grad_jax=np.asarray(sllh_jax)\n", - "rel_diff=(grad_amici-grad_jax)/grad_jax\n", - "pd.DataFrame(index=r['sllh'].keys(), data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff))" + "grad_amici = np.asarray(list(r[\"sllh\"].values()))\n", + "grad_jax = np.asarray(sllh_jax)\n", + "rel_diff = (grad_amici - grad_jax) / grad_jax\n", + "pd.DataFrame(\n", + " index=r[\"sllh\"].keys(),\n", + " data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff),\n", + ")" ] }, { @@ -664,7 +695,9 @@ "outputs": [], "source": [ "jax.config.update(\"jax_enable_x64\", True)\n", - "llh_jax, sllh_jax = jax_objective_with_parameter_transform(scaled_parameters_np)" + "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n", + " scaled_parameters_np\n", + ")" ] }, { @@ -729,7 +762,14 @@ } ], "source": [ - "pd.DataFrame(dict(amici=r['llh'], jax=float(llh_jax), rel_diff=(r['llh']-float(llh_jax))/r['llh']), index=('llh',))" + "pd.DataFrame(\n", + " dict(\n", + " amici=r[\"llh\"],\n", + " jax=float(llh_jax),\n", + " rel_diff=(r[\"llh\"] - float(llh_jax)) / r[\"llh\"],\n", + " ),\n", + " index=(\"llh\",),\n", + ")" ] }, { @@ -842,10 +882,13 @@ } ], "source": [ - "grad_amici=np.asarray(list(r['sllh'].values()))\n", - "grad_jax=np.asarray(sllh_jax)\n", - "rel_diff=(grad_amici-grad_jax)/grad_jax\n", - "pd.DataFrame(index=r['sllh'].keys(), data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff))" + "grad_amici = np.asarray(list(r[\"sllh\"].values()))\n", + "grad_jax = np.asarray(sllh_jax)\n", + "rel_diff = (grad_amici - grad_jax) / grad_jax\n", + "pd.DataFrame(\n", + " index=r[\"sllh\"].keys(),\n", + " data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff),\n", + ")" ] } ], diff --git a/python/examples/example_large_models/example_performance_optimization.ipynb b/python/examples/example_large_models/example_performance_optimization.ipynb index e3dd72b6cd..31a9fc1729 100644 --- a/python/examples/example_large_models/example_performance_optimization.ipynb +++ b/python/examples/example_large_models/example_performance_optimization.ipynb @@ -39,7 +39,7 @@ "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "\n", - "plt.rcParams.update({'font.size': 12})" + "plt.rcParams.update({\"font.size\": 12})" ] }, { @@ -122,7 +122,9 @@ "plt.bar([\"True\", \"False\"], [873.54, 697.85])\n", "plt.xlabel(\"generate_sensitivity_code\")\n", "plt.ylabel(\"Import time (s)\")\n", - "plt.title(\"Import speed-up when not generating sensitivity code\\n(Froehlich_CellSystems2018)\");\n", + "plt.title(\n", + " \"Import speed-up when not generating sensitivity code\\n(Froehlich_CellSystems2018)\"\n", + ")\n", "plt.show()\n", "\n", "print(f\"speedup: {873.54/697.85:.2f}x\")" @@ -217,9 +219,11 @@ " plt.ylabel(\"Import time (s)\")\n", " plt.ylim(ymin=0)\n", " plt.show()\n", - " \n", + "\n", " import_times = df.sort_values(\"nprocs\")[\"time\"].values\n", - " percent_change = (import_times[0] - min(import_times[1:])) / import_times[0] * 100\n", + " percent_change = (\n", + " (import_times[0] - min(import_times[1:])) / import_times[0] * 100\n", + " )\n", " if percent_change > 0:\n", " print(f\"Import time decreased by up to ~{percent_change:.0f}%.\")\n", " else:\n", @@ -281,14 +285,19 @@ "source": [ "figsize(8, 4)\n", "compilation_time_s = [3022.453, 289.518]\n", - "labels = [\"g++ (Ubuntu 12.2.0-3ubuntu1) 12.2.0\", \"Ubuntu clang version 15.0.2-1\"]\n", + "labels = [\n", + " \"g++ (Ubuntu 12.2.0-3ubuntu1) 12.2.0\",\n", + " \"Ubuntu clang version 15.0.2-1\",\n", + "]\n", "plt.bar(labels, compilation_time_s)\n", "plt.ylim(ymin=0)\n", "plt.title(\"Choice of compiler - FröhlichGer2022\")\n", "plt.xlabel(\"Compiler\")\n", - "plt.ylabel(\"Walltime for compilation (s)\");\n", + "plt.ylabel(\"Walltime for compilation (s)\")\n", "plt.show()\n", - "print(f\"Clang was ~{compilation_time_s[0] / compilation_time_s[1]:.0f}x as fast as g++.\")" + "print(\n", + " f\"Clang was ~{compilation_time_s[0] / compilation_time_s[1]:.0f}x as fast as g++.\"\n", + ")" ] }, { @@ -360,10 +369,12 @@ " plt.ylabel(\"Compile time (s)\")\n", " plt.ylim(ymin=0)\n", " plt.show()\n", - " \n", + "\n", " compilation_time_s = df.sort_values(\"nprocs\")[\"time\"].values\n", - " print(\"We were able to reduce compile time by up to \"\n", - " f\"~{(compilation_time_s[0] - min(compilation_time_s[1:])) / compilation_time_s[0] * 100:.0f}%.\")" + " print(\n", + " \"We were able to reduce compile time by up to \"\n", + " f\"~{(compilation_time_s[0] - min(compilation_time_s[1:])) / compilation_time_s[0] * 100:.0f}%.\"\n", + " )" ] }, { diff --git a/python/examples/example_petab/petab.ipynb b/python/examples/example_petab/petab.ipynb index 27ee96e449..689d793f56 100644 --- a/python/examples/example_petab/petab.ipynb +++ b/python/examples/example_petab/petab.ipynb @@ -39,10 +39,10 @@ "output_type": "stream", "text": [ "Cloning into 'tmp/benchmark-models'...\n", - "remote: Enumerating objects: 142, done.\u001B[K\n", - "remote: Counting objects: 100% (142/142), done.\u001B[K\n", - "remote: Compressing objects: 100% (122/122), done.\u001B[K\n", - "remote: Total 142 (delta 41), reused 104 (delta 18), pack-reused 0\u001B[K\n", + "remote: Enumerating objects: 142, done.\u001b[K\n", + "remote: Counting objects: 100% (142/142), done.\u001b[K\n", + "remote: Compressing objects: 100% (122/122), done.\u001b[K\n", + "remote: Total 142 (delta 41), reused 104 (delta 18), pack-reused 0\u001b[K\n", "Receiving objects: 100% (142/142), 648.29 KiB | 1.23 MiB/s, done.\n", "Resolving deltas: 100% (41/41), done.\n" ] @@ -335,10 +335,15 @@ ], "source": [ "parameters = {\n", - " x_id: x_val for x_id, x_val in\n", - " zip(petab_problem.x_ids, petab_problem.x_nominal_scaled)\n", + " x_id: x_val\n", + " for x_id, x_val in zip(petab_problem.x_ids, petab_problem.x_nominal_scaled)\n", "}\n", - "simulate_petab(petab_problem, amici_model, problem_parameters=parameters, scaled_parameters=True)" + "simulate_petab(\n", + " petab_problem,\n", + " amici_model,\n", + " problem_parameters=parameters,\n", + " scaled_parameters=True,\n", + ")" ] }, { diff --git a/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb b/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb index 07da70c02f..63fbc7a4ff 100644 --- a/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb +++ b/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb @@ -15,9 +15,9 @@ "outputs": [], "source": [ "# SBML model we want to import\n", - "sbml_file = 'model_presimulation.xml'\n", + "sbml_file = \"model_presimulation.xml\"\n", "# Name of the model that will also be the name of the python module\n", - "model_name = 'model_presimulation'\n", + "model_name = \"model_presimulation\"\n", "# Directory to which the generated model code is written\n", "model_output_dir = model_name\n", "\n", @@ -86,22 +86,45 @@ "sbml_doc = sbml_reader.readSBML(sbml_file)\n", "sbml_model = sbml_doc.getModel()\n", "\n", - "print('Species:')\n", - "pprint([(s.getId(),s.getName()) for s in sbml_model.getListOfSpecies()])\n", + "print(\"Species:\")\n", + "pprint([(s.getId(), s.getName()) for s in sbml_model.getListOfSpecies()])\n", "\n", - "print('\\nReactions:')\n", + "print(\"\\nReactions:\")\n", "for reaction in sbml_model.getListOfReactions():\n", - " reactants = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfReactants()])\n", - " products = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfProducts()])\n", - " reversible = '<' if reaction.getReversible() else ''\n", - " print('%3s: %10s %1s->%10s\\t\\t[%s]' % (reaction.getName(), \n", - " reactants,\n", - " reversible,\n", - " products,\n", - " libsbml.formulaToL3String(reaction.getKineticLaw().getMath())))\n", - " \n", - "print('Parameters:')\n", - "pprint([(p.getId(),p.getName()) for p in sbml_model.getListOfParameters()])" + " reactants = \" + \".join(\n", + " [\n", + " \"%s %s\"\n", + " % (\n", + " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n", + " r.getSpecies(),\n", + " )\n", + " for r in reaction.getListOfReactants()\n", + " ]\n", + " )\n", + " products = \" + \".join(\n", + " [\n", + " \"%s %s\"\n", + " % (\n", + " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n", + " r.getSpecies(),\n", + " )\n", + " for r in reaction.getListOfProducts()\n", + " ]\n", + " )\n", + " reversible = \"<\" if reaction.getReversible() else \"\"\n", + " print(\n", + " \"%3s: %10s %1s->%10s\\t\\t[%s]\"\n", + " % (\n", + " reaction.getName(),\n", + " reactants,\n", + " reversible,\n", + " products,\n", + " libsbml.formulaToL3String(reaction.getKineticLaw().getMath()),\n", + " )\n", + " )\n", + "\n", + "print(\"Parameters:\")\n", + "pprint([(p.getId(), p.getName()) for p in sbml_model.getListOfParameters()])" ] }, { @@ -152,7 +175,12 @@ ], "source": [ "from IPython.display import IFrame\n", - "IFrame('https://amici.readthedocs.io/en/latest/glossary.html#term-fixed-parameters', width=600, height=175)" + "\n", + "IFrame(\n", + " \"https://amici.readthedocs.io/en/latest/glossary.html#term-fixed-parameters\",\n", + " width=600,\n", + " height=175,\n", + ")" ] }, { @@ -161,7 +189,7 @@ "metadata": {}, "outputs": [], "source": [ - "fixedParameters = ['DRUG_0','KIN_0']" + "fixedParameters = [\"DRUG_0\", \"KIN_0\"]" ] }, { @@ -190,10 +218,10 @@ "source": [ "# Retrieve model output names and formulae from AssignmentRules and remove the respective rules\n", "observables = amici.assignmentRules2observables(\n", - " sbml_importer.sbml, # the libsbml model object\n", - " filter_function=lambda variable: variable.getName() == 'pPROT' \n", - " )\n", - "print('Observables:')\n", + " sbml_importer.sbml, # the libsbml model object\n", + " filter_function=lambda variable: variable.getName() == \"pPROT\",\n", + ")\n", + "print(\"Observables:\")\n", "pprint(observables)" ] }, @@ -210,11 +238,13 @@ "metadata": {}, "outputs": [], "source": [ - "sbml_importer.sbml2amici(model_name, \n", - " model_output_dir, \n", - " verbose=False,\n", - " observables=observables,\n", - " constant_parameters=fixedParameters)\n", + "sbml_importer.sbml2amici(\n", + " model_name,\n", + " model_output_dir,\n", + " verbose=False,\n", + " observables=observables,\n", + " constant_parameters=fixedParameters,\n", + ")\n", "# load the generated module\n", "model_module = amici.import_model_module(model_name, model_output_dir)" ] @@ -266,7 +296,7 @@ ], "source": [ "# Run simulation using default model parameters and solver options\n", - "model.setTimepoints(np.linspace(0, 60, 60)) \n", + "model.setTimepoints(np.linspace(0, 60, 60))\n", "rdata = amici.runAmiciSimulation(model, solver)\n", "amici.plotting.plotObservableTrajectories(rdata)" ] @@ -298,7 +328,7 @@ ], "source": [ "edata = amici.ExpData(rdata, 0.1, 0.0)\n", - "edata.fixedParameters = [0,2]\n", + "edata.fixedParameters = [0, 2]\n", "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "amici.plotting.plotObservableTrajectories(rdata)" ] @@ -330,7 +360,7 @@ } ], "source": [ - "edata.fixedParametersPreequilibration = [3,0]\n", + "edata.fixedParametersPreequilibration = [3, 0]\n", "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "amici.plotting.plotObservableTrajectories(rdata)" ] diff --git a/python/examples/example_presimulation/createModelPresimulation.py b/python/examples/example_presimulation/createModelPresimulation.py index 4806b67647..1db454a6b5 100644 --- a/python/examples/example_presimulation/createModelPresimulation.py +++ b/python/examples/example_presimulation/createModelPresimulation.py @@ -1,7 +1,15 @@ import os import pysb.export -from pysb.core import Expression, Initial, Model, Monomer, Observable, Parameter, Rule +from pysb.core import ( + Expression, + Initial, + Model, + Monomer, + Observable, + Parameter, + Rule, +) model = Model() @@ -41,7 +49,8 @@ Rule( "PROT_dephospho", - prot(phospho="p", drug=None, kin=None) >> prot(phospho="u", drug=None, kin=None), + prot(phospho="p", drug=None, kin=None) + >> prot(phospho="u", drug=None, kin=None), Parameter("kdephospho_prot", 0.1), ) diff --git a/python/examples/example_splines/ExampleSplines.ipynb b/python/examples/example_splines/ExampleSplines.ipynb index 593c84e3b9..d376ba91e5 100644 --- a/python/examples/example_splines/ExampleSplines.ipynb +++ b/python/examples/example_splines/ExampleSplines.ipynb @@ -40,36 +40,61 @@ "from matplotlib import pyplot as plt\n", "\n", "# Choose build directory\n", - "BUILD_PATH = None # temporary folder\n", + "BUILD_PATH = None # temporary folder\n", "# BUILD_PATH = 'build' # specified folder for debugging\n", "if BUILD_PATH is not None:\n", " # Remove previous models\n", " rmtree(BUILD_PATH, ignore_errors=True)\n", " os.mkdir(BUILD_PATH)\n", - " \n", + "\n", + "\n", "def simulate(sbml_model, parameters=None, *, model_name=None, **kwargs):\n", " if model_name is None:\n", - " model_name = 'model_' + uuid1().hex\n", + " model_name = \"model_\" + uuid1().hex\n", " if BUILD_PATH is None:\n", " with TemporaryDirectory() as build_dir:\n", - " return _simulate(sbml_model, parameters, build_dir=build_dir, model_name=model_name, **kwargs)\n", + " return _simulate(\n", + " sbml_model,\n", + " parameters,\n", + " build_dir=build_dir,\n", + " model_name=model_name,\n", + " **kwargs\n", + " )\n", " else:\n", " build_dir = os.path.join(BUILD_PATH, model_name)\n", " rmtree(build_dir, ignore_errors=True)\n", - " return _simulate(sbml_model, parameters, build_dir=build_dir, model_name=model_name, **kwargs)\n", + " return _simulate(\n", + " sbml_model,\n", + " parameters,\n", + " build_dir=build_dir,\n", + " model_name=model_name,\n", + " **kwargs\n", + " )\n", "\n", - "def _simulate(sbml_model, parameters, *, build_dir, model_name, T=1, discard_annotations=False, plot=True):\n", + "\n", + "def _simulate(\n", + " sbml_model,\n", + " parameters,\n", + " *,\n", + " build_dir,\n", + " model_name,\n", + " T=1,\n", + " discard_annotations=False,\n", + " plot=True\n", + "):\n", " if parameters is None:\n", " parameters = {}\n", " # Build the model module from the SBML file\n", - " sbml_importer = amici.SbmlImporter(sbml_model, discard_annotations=discard_annotations)\n", + " sbml_importer = amici.SbmlImporter(\n", + " sbml_model, discard_annotations=discard_annotations\n", + " )\n", " sbml_importer.sbml2amici(model_name, build_dir)\n", " # Import the model module\n", " sys.path.insert(0, os.path.abspath(build_dir))\n", " model_module = import_module(model_name)\n", " # Setup simulation timepoints and parameters\n", " model = model_module.getModel()\n", - " for (name, value) in parameters.items():\n", + " for name, value in parameters.items():\n", " model.setParameterByName(name, value)\n", " if isinstance(T, (int, float)):\n", " T = np.linspace(0, T, 100)\n", @@ -82,7 +107,7 @@ " # Plot results\n", " if plot:\n", " fig, ax = plt.subplots()\n", - " ax.plot(rdata['t'], rdata['x'])\n", + " ax.plot(rdata[\"t\"], rdata[\"x\"])\n", " ax.set_xlabel(\"time\")\n", " ax.set_ylabel(\"concentration\")\n", " return model, rdata" @@ -161,7 +186,7 @@ } ], "source": [ - "simulate('example_splines.xml', dict(f=1));" + "simulate(\"example_splines.xml\", dict(f=1));" ] }, { @@ -204,8 +229,8 @@ "source": [ "# Create a spline object\n", "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", - " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline function is evaluated at the current time point\n", + " sbml_id=\"f\",\n", + " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline function is evaluated at the current time point\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n", " values_at_nodes=[1, -1, 2],\n", ")" @@ -256,7 +281,7 @@ ], "source": [ "# Plot the spline\n", - "spline.plot(xlabel='time');" + "spline.plot(xlabel=\"time\");" ] }, { @@ -269,7 +294,8 @@ "source": [ "# Load SBML model using libsbml\n", "import libsbml\n", - "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n", + "\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n", "sbml_model = sbml_doc.getModel()\n", "# We can add the spline assignment rule to the SBML model\n", "spline.add_to_sbml_model(sbml_model)" @@ -307,7 +333,7 @@ "outputs": [], "source": [ "# Final value should be equal to the integral computed above\n", - "assert np.allclose(rdata['x'][-1], float(spline.integrate(0.0, 1.0)))" + "assert np.allclose(rdata[\"x\"][-1], float(spline.integrate(0.0, 1.0)))" ] }, { @@ -466,10 +492,10 @@ "outputs": [], "source": [ "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n", - " values_at_nodes=sp.symbols('f0:3'),\n", + " values_at_nodes=sp.symbols(\"f0:3\"),\n", ")" ] }, @@ -481,7 +507,7 @@ }, "outputs": [], "source": [ - "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n", "sbml_model = sbml_doc.getModel()\n", "spline.add_to_sbml_model(\n", " sbml_model,\n", @@ -520,7 +546,7 @@ } ], "source": [ - "spline.plot(parameters, xlabel='time');" + "spline.plot(parameters, xlabel=\"time\");" ] }, { @@ -566,9 +592,9 @@ "source": [ "# Sensitivities with respect to the spline values can be computed\n", "fig, ax = plt.subplots()\n", - "ax.plot(rdata['t'], rdata.sx[:, 0], label=model.getParameterNames()[0])\n", - "ax.plot(rdata['t'], rdata.sx[:, 1], label=model.getParameterNames()[1])\n", - "ax.plot(rdata['t'], rdata.sx[:, 2], label=model.getParameterNames()[2])\n", + "ax.plot(rdata[\"t\"], rdata.sx[:, 0], label=model.getParameterNames()[0])\n", + "ax.plot(rdata[\"t\"], rdata.sx[:, 1], label=model.getParameterNames()[1])\n", + "ax.plot(rdata[\"t\"], rdata.sx[:, 2], label=model.getParameterNames()[2])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"sensitivity\")\n", "ax.legend();" @@ -600,7 +626,7 @@ "source": [ "# A simple spline for which finite differencing would give a different result\n", "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n", " values_at_nodes=[1.0, -1.0, 1.0],\n", @@ -627,7 +653,7 @@ } ], "source": [ - "spline.plot(xlabel='time');" + "spline.plot(xlabel=\"time\");" ] }, { @@ -650,7 +676,7 @@ ], "source": [ "# Simulation\n", - "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n", "sbml_model = sbml_doc.getModel()\n", "spline.add_to_sbml_model(sbml_model)\n", "simulate(sbml_model, T=1);" @@ -698,11 +724,14 @@ "outputs": [], "source": [ "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n", " values_at_nodes=[-2, 1, -1],\n", - " extrapolate=(None, 'constant'), # no extrapolation required on the left side\n", + " extrapolate=(\n", + " None,\n", + " \"constant\",\n", + " ), # no extrapolation required on the left side\n", ")" ] }, @@ -725,7 +754,7 @@ } ], "source": [ - "spline.plot(xlabel='time', xlim=(0, 1.5));" + "spline.plot(xlabel=\"time\", xlim=(0, 1.5));" ] }, { @@ -747,7 +776,7 @@ } ], "source": [ - "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n", "sbml_model = sbml_doc.getModel()\n", "spline.add_to_sbml_model(sbml_model)\n", "simulate(sbml_model, T=1.5);" @@ -790,11 +819,11 @@ "outputs": [], "source": [ "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n", - " values_at_nodes=[-2, 1, -2], # first and last node must coincide\n", - " extrapolate='periodic',\n", + " values_at_nodes=[-2, 1, -2], # first and last node must coincide\n", + " extrapolate=\"periodic\",\n", ")" ] }, @@ -817,7 +846,7 @@ } ], "source": [ - "spline.plot(xlabel='time', xlim=(0, 3));" + "spline.plot(xlabel=\"time\", xlim=(0, 3));" ] }, { @@ -839,7 +868,7 @@ } ], "source": [ - "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n", "sbml_model = sbml_doc.getModel()\n", "spline.add_to_sbml_model(sbml_model)\n", "simulate(sbml_model, T=3);" @@ -882,11 +911,11 @@ "outputs": [], "source": [ "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=4),\n", " values_at_nodes=[-1, 2, 4, 2],\n", - " bc='zeroderivative',\n", + " bc=\"zeroderivative\",\n", ")" ] }, @@ -909,7 +938,7 @@ } ], "source": [ - "spline.plot(xlabel='time');" + "spline.plot(xlabel=\"time\");" ] }, { @@ -941,11 +970,11 @@ "outputs": [], "source": [ "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=4),\n", " values_at_nodes=[-1, 2, 4, 2],\n", - " bc='natural',\n", + " bc=\"natural\",\n", ")" ] }, @@ -968,7 +997,7 @@ } ], "source": [ - "spline.plot(xlabel='time');" + "spline.plot(xlabel=\"time\");" ] }, { @@ -1007,7 +1036,7 @@ "outputs": [], "source": [ "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=5),\n", " values_at_nodes=[2, 0.05, 0.1, 2, 1],\n", @@ -1034,7 +1063,7 @@ ], "source": [ "# This spline assumes negative values!\n", - "spline.plot(xlabel='time');" + "spline.plot(xlabel=\"time\");" ] }, { @@ -1046,7 +1075,7 @@ "outputs": [], "source": [ "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=5),\n", " values_at_nodes=[2, 0.05, 0.1, 2, 1],\n", @@ -1075,7 +1104,7 @@ "source": [ "# Instead of under-shooting we now have over-shooting,\n", "# but at least the \"spline\" is always positive\n", - "spline.plot(xlabel='time');" + "spline.plot(xlabel=\"time\");" ] }, { @@ -1121,9 +1150,27 @@ }, "outputs": [], "source": [ - "nruns = 6 # number of replicates\n", - "num_nodes = [5, 10, 15, 20, 25, 30, 40] # benchmark model import for these node numbers\n", - "amici_only_nodes = [50, 75, 100, 125, 150, 175, 200, 225, 250] # for these node numbers, only benchmark the annotation-based implementation" + "nruns = 6 # number of replicates\n", + "num_nodes = [\n", + " 5,\n", + " 10,\n", + " 15,\n", + " 20,\n", + " 25,\n", + " 30,\n", + " 40,\n", + "] # benchmark model import for these node numbers\n", + "amici_only_nodes = [\n", + " 50,\n", + " 75,\n", + " 100,\n", + " 125,\n", + " 150,\n", + " 175,\n", + " 200,\n", + " 225,\n", + " 250,\n", + "] # for these node numbers, only benchmark the annotation-based implementation" ] }, { @@ -1133,7 +1180,7 @@ "outputs": [], "source": [ "# If running as a Github action, just do the minimal amount of work required to check whether the code is working\n", - "if os.getenv('GITHUB_ACTIONS') is not None:\n", + "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n", " nruns = 1\n", " num_nodes = [4]\n", " amici_only_nodes = [5]" @@ -1151,12 +1198,12 @@ "for n in num_nodes + amici_only_nodes:\n", " # Create model\n", " spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='f',\n", + " sbml_id=\"f\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=n),\n", " values_at_nodes=np.random.rand(n),\n", " )\n", - " sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n", + " sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n", " sbml_model = sbml_doc.getModel()\n", " spline.add_to_sbml_model(sbml_model)\n", " # Benchmark model creation\n", @@ -1165,22 +1212,34 @@ " for _ in range(nruns):\n", " with tempfile.TemporaryDirectory() as tmpdir:\n", " t0 = time.perf_counter_ns()\n", - " amici.SbmlImporter(sbml_model).sbml2amici('benchmark', tmpdir)\n", + " amici.SbmlImporter(sbml_model).sbml2amici(\"benchmark\", tmpdir)\n", " dt = time.perf_counter_ns() - t0\n", " timings_amici.append(dt / 1e9)\n", " if n in num_nodes:\n", " with tempfile.TemporaryDirectory() as tmpdir:\n", " t0 = time.perf_counter_ns()\n", - " amici.SbmlImporter(sbml_model, discard_annotations=True).sbml2amici('benchmark', tmpdir)\n", + " amici.SbmlImporter(\n", + " sbml_model, discard_annotations=True\n", + " ).sbml2amici(\"benchmark\", tmpdir)\n", " dt = time.perf_counter_ns() - t0\n", " timings_piecewise.append(dt / 1e9)\n", " # Append benchmark data to dataframe\n", - " df_amici = pd.DataFrame(dict(num_nodes=n, time=timings_amici, use_annotations=True))\n", - " df_piecewise = pd.DataFrame(dict(num_nodes=n, time=timings_piecewise, use_annotations=False))\n", + " df_amici = pd.DataFrame(\n", + " dict(num_nodes=n, time=timings_amici, use_annotations=True)\n", + " )\n", + " df_piecewise = pd.DataFrame(\n", + " dict(num_nodes=n, time=timings_piecewise, use_annotations=False)\n", + " )\n", " if df is None:\n", - " df = pd.concat([df_amici, df_piecewise], ignore_index=True, verify_integrity=True)\n", + " df = pd.concat(\n", + " [df_amici, df_piecewise], ignore_index=True, verify_integrity=True\n", + " )\n", " else:\n", - " df = pd.concat([df, df_amici, df_piecewise], ignore_index=True, verify_integrity=True)" + " df = pd.concat(\n", + " [df, df_amici, df_piecewise],\n", + " ignore_index=True,\n", + " verify_integrity=True,\n", + " )" ] }, { @@ -1203,19 +1262,62 @@ ], "source": [ "kwargs = dict(markersize=7.5)\n", - "df_avg = df.groupby(['use_annotations', 'num_nodes']).mean().reset_index()\n", + "df_avg = df.groupby([\"use_annotations\", \"num_nodes\"]).mean().reset_index()\n", "fig, ax = plt.subplots(1, 1, figsize=(6.5, 3.5))\n", - "ax.plot(df_avg[np.logical_not(df_avg['use_annotations'])]['num_nodes'], df_avg[np.logical_not(df_avg['use_annotations'])]['time'], '.', label='MathML piecewise', **kwargs)\n", - "ax.plot(df_avg[df_avg['use_annotations']]['num_nodes'], df_avg[df_avg['use_annotations']]['time'], '.', label='AMICI annotations', **kwargs)\n", - "ax.set_ylabel('model import time (s)')\n", - "ax.set_xlabel('number of spline nodes')\n", - "ax.set_yscale('log')\n", - "ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, pos: f\"{x:.0f}\"))\n", - "ax.xaxis.set_ticks([10, 20, 30, 40, 60, 70, 80, 90, 110, 120, 130, 140, 160, 170, 180, 190, 210, 220, 230, 240, 260], minor=True)\n", - "ax.yaxis.set_ticks([20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400], ['20', '30', '40', '50', None, None, None, None, '200', '300', '400'], minor=True)\n", + "ax.plot(\n", + " df_avg[np.logical_not(df_avg[\"use_annotations\"])][\"num_nodes\"],\n", + " df_avg[np.logical_not(df_avg[\"use_annotations\"])][\"time\"],\n", + " \".\",\n", + " label=\"MathML piecewise\",\n", + " **kwargs,\n", + ")\n", + "ax.plot(\n", + " df_avg[df_avg[\"use_annotations\"]][\"num_nodes\"],\n", + " df_avg[df_avg[\"use_annotations\"]][\"time\"],\n", + " \".\",\n", + " label=\"AMICI annotations\",\n", + " **kwargs,\n", + ")\n", + "ax.set_ylabel(\"model import time (s)\")\n", + "ax.set_xlabel(\"number of spline nodes\")\n", + "ax.set_yscale(\"log\")\n", + "ax.yaxis.set_major_formatter(\n", + " mpl.ticker.FuncFormatter(lambda x, pos: f\"{x:.0f}\")\n", + ")\n", + "ax.xaxis.set_ticks(\n", + " [\n", + " 10,\n", + " 20,\n", + " 30,\n", + " 40,\n", + " 60,\n", + " 70,\n", + " 80,\n", + " 90,\n", + " 110,\n", + " 120,\n", + " 130,\n", + " 140,\n", + " 160,\n", + " 170,\n", + " 180,\n", + " 190,\n", + " 210,\n", + " 220,\n", + " 230,\n", + " 240,\n", + " 260,\n", + " ],\n", + " minor=True,\n", + ")\n", + "ax.yaxis.set_ticks(\n", + " [20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400],\n", + " [\"20\", \"30\", \"40\", \"50\", None, None, None, None, \"200\", \"300\", \"400\"],\n", + " minor=True,\n", + ")\n", "ax.legend()\n", "ax.figure.tight_layout()\n", - "#ax.figure.savefig('benchmark_import.pdf')" + "# ax.figure.savefig('benchmark_import.pdf')" ] } ], diff --git a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb index 8846974330..8e3ee6db10 100644 --- a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb +++ b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb @@ -104,9 +104,11 @@ "outputs": [], "source": [ "# If running as a Github action, just do the minimal amount of work required to check whether the code is working\n", - "if os.getenv('GITHUB_ACTIONS') is not None:\n", + "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n", " n_starts = 15\n", - " pypesto_optimizer = pypesto.optimize.FidesOptimizer(verbose=logging.WARNING, options=dict(maxiter=10))\n", + " pypesto_optimizer = pypesto.optimize.FidesOptimizer(\n", + " verbose=logging.WARNING, options=dict(maxiter=10)\n", + " )\n", " pypesto_engine = pypesto.engine.SingleCoreEngine()" ] }, @@ -175,14 +177,16 @@ "source": [ "# Create spline for pEpoR\n", "nodes = [0, 5, 10, 20, 60]\n", - "values_at_nodes = [sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes] # new parameter symbols for spline values\n", + "values_at_nodes = [\n", + " sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes\n", + "] # new parameter symbols for spline values\n", "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='pEpoR', # matches name of species in SBML model\n", - " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline is evaluated at the current time\n", + " sbml_id=\"pEpoR\", # matches name of species in SBML model\n", + " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline is evaluated at the current time\n", " nodes=nodes,\n", - " values_at_nodes=values_at_nodes, # values at the nodes (in linear scale)\n", - " extrapolate=(None, \"constant\"), # because steady state is reached\n", - " bc=\"auto\", # automatically determined from extrapolate (bc at right end will be 'zero_derivative')\n", + " values_at_nodes=values_at_nodes, # values at the nodes (in linear scale)\n", + " extrapolate=(None, \"constant\"), # because steady state is reached\n", + " bc=\"auto\", # automatically determined from extrapolate (bc at right end will be 'zero_derivative')\n", " logarithmic_parametrization=True,\n", ")" ] @@ -209,9 +213,13 @@ "outputs": [], "source": [ "# Add spline formula to SBML model\n", - "sbml_doc = libsbml.SBMLReader().readSBML(os.path.join('Swameye_PNAS2003', 'swameye2003_model.xml'))\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_model.xml\")\n", + ")\n", "sbml_model = sbml_doc.getModel()\n", - "spline.add_to_sbml_model(sbml_model, auto_add=True, y_nominal=0.1, y_constant=True)" + "spline.add_to_sbml_model(\n", + " sbml_model, auto_add=True, y_nominal=0.1, y_constant=True\n", + ")" ] }, { @@ -239,7 +247,13 @@ "source": [ "# Extra parameters associated to the spline\n", "spline_parameters_df = pd.DataFrame(\n", - " dict(parameterScale='log', lowerBound=0.001, upperBound=10, nominalValue=0.1, estimate=1),\n", + " dict(\n", + " parameterScale=\"log\",\n", + " lowerBound=0.001,\n", + " upperBound=10,\n", + " nominalValue=0.1,\n", + " estimate=1,\n", + " ),\n", " index=pd.Series(list(map(str, values_at_nodes)), name=\"parameterId\"),\n", ")" ] @@ -256,13 +270,22 @@ "# Create PEtab problem\n", "petab_problem = petab.Problem(\n", " sbml_model,\n", - " condition_df=petab.conditions.get_condition_df(os.path.join('Swameye_PNAS2003', 'swameye2003_conditions.tsv')),\n", - " measurement_df=petab.measurements.get_measurement_df(os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv')),\n", + " condition_df=petab.conditions.get_condition_df(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_conditions.tsv\")\n", + " ),\n", + " measurement_df=petab.measurements.get_measurement_df(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\")\n", + " ),\n", " parameter_df=petab.core.concat_tables(\n", - " [os.path.join('Swameye_PNAS2003', 'swameye2003_parameters.tsv'), spline_parameters_df],\n", - " petab.parameters.get_parameter_df\n", + " [\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_parameters.tsv\"),\n", + " spline_parameters_df,\n", + " ],\n", + " petab.parameters.get_parameter_df,\n", + " ),\n", + " observable_df=petab.observables.get_observable_df(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_observables.tsv\")\n", " ),\n", - " observable_df=petab.observables.get_observable_df(os.path.join('Swameye_PNAS2003', 'swameye2003_observables.tsv')),\n", ")" ] }, @@ -351,7 +374,9 @@ "outputs": [], "source": [ "# Import PEtab problem into pyPESTO\n", - "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()\n", + "pypesto_problem = pypesto.petab.PetabImporter(\n", + " petab_problem, model_name=name\n", + ").create_problem()\n", "\n", "# Increase maximum number of steps for AMICI\n", "pypesto_problem.objective.amici_solver.setMaxSteps(10**5)" @@ -378,8 +403,10 @@ "outputs": [], "source": [ "# Load existing results if available\n", - "if os.path.exists(f'{name}.h5'):\n", - " pypesto_result = pypesto.store.read_result(f'{name}.h5', problem=pypesto_problem)\n", + "if os.path.exists(f\"{name}.h5\"):\n", + " pypesto_result = pypesto.store.read_result(\n", + " f\"{name}.h5\", problem=pypesto_problem\n", + " )\n", "else:\n", " pypesto_result = None\n", "# Overwrite\n", @@ -401,7 +428,7 @@ " new_ids = [str(i) for i in range(n_starts)]\n", " else:\n", " last_id = max(int(i) for i in pypesto_result.optimize_result.id)\n", - " new_ids = [str(i) for i in range(last_id+1, last_id+n_starts+1)]\n", + " new_ids = [str(i) for i in range(last_id + 1, last_id + n_starts + 1)]\n", " pypesto_result = pypesto.optimize.minimize(\n", " pypesto_problem,\n", " n_starts=n_starts,\n", @@ -412,7 +439,9 @@ " )\n", " pypesto_result.optimize_result.sort()\n", " if pypesto_result.optimize_result.x[0] is None:\n", - " raise Exception(\"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\")" + " raise Exception(\n", + " \"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\"\n", + " )" ] }, { @@ -495,32 +524,52 @@ " if N is None:\n", " objective = problem.objective\n", " else:\n", - " objective = problem.objective.set_custom_timepoints(timepoints_global=np.linspace(0, 60, N))\n", + " objective = problem.objective.set_custom_timepoints(\n", + " timepoints_global=np.linspace(0, 60, N)\n", + " )\n", " if len(x) != len(problem.x_free_indices):\n", " x = x[problem.x_free_indices]\n", " simresult = objective(x, return_dict=True, **kwargs)\n", - " return problem, simresult['rdatas'][0]\n", + " return problem, simresult[\"rdatas\"][0]\n", + "\n", "\n", "def simulate_pEpoR(x=None, **kwargs):\n", " problem, rdata = _simulate(x, **kwargs)\n", - " assert problem.objective.amici_model.getObservableIds()[0].startswith('pEpoR')\n", - " return rdata['t'], rdata['y'][:, 0]\n", + " assert problem.objective.amici_model.getObservableIds()[0].startswith(\n", + " \"pEpoR\"\n", + " )\n", + " return rdata[\"t\"], rdata[\"y\"][:, 0]\n", + "\n", "\n", "def simulate_pSTAT5(x=None, **kwargs):\n", " problem, rdata = _simulate(x, **kwargs)\n", - " assert problem.objective.amici_model.getObservableIds()[1].startswith('pSTAT5')\n", - " return rdata['t'], rdata['y'][:, 1]\n", + " assert problem.objective.amici_model.getObservableIds()[1].startswith(\n", + " \"pSTAT5\"\n", + " )\n", + " return rdata[\"t\"], rdata[\"y\"][:, 1]\n", + "\n", "\n", "def simulate_tSTAT5(x=None, **kwargs):\n", " problem, rdata = _simulate(x, **kwargs)\n", - " assert problem.objective.amici_model.getObservableIds()[-1].startswith('tSTAT5')\n", - " return rdata['t'], rdata['y'][:, -1]\n", + " assert problem.objective.amici_model.getObservableIds()[-1].startswith(\n", + " \"tSTAT5\"\n", + " )\n", + " return rdata[\"t\"], rdata[\"y\"][:, -1]\n", + "\n", "\n", "# Experimental data\n", - "df_measurements = petab.measurements.get_measurement_df(os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv'))\n", - "df_pEpoR = df_measurements[df_measurements['observableId'].str.startswith('pEpoR')]\n", - "df_pSTAT5 = df_measurements[df_measurements['observableId'].str.startswith('pSTAT5')]\n", - "df_tSTAT5 = df_measurements[df_measurements['observableId'].str.startswith('tSTAT5')]" + "df_measurements = petab.measurements.get_measurement_df(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\")\n", + ")\n", + "df_pEpoR = df_measurements[\n", + " df_measurements[\"observableId\"].str.startswith(\"pEpoR\")\n", + "]\n", + "df_pSTAT5 = df_measurements[\n", + " df_measurements[\"observableId\"].str.startswith(\"pSTAT5\")\n", + "]\n", + "df_tSTAT5 = df_measurements[\n", + " df_measurements[\"observableId\"].str.startswith(\"tSTAT5\")\n", + "]" ] }, { @@ -547,11 +596,34 @@ "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "t, pEpoR = simulate_pEpoR()\n", "sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n", - "ax.fill_between(t, pEpoR - 2*sigma_pEpoR, pEpoR + 2*sigma_pEpoR, color='black', alpha=0.10, interpolate=True, label='2-sigma error bands')\n", - "ax.plot(t, pEpoR, color='black', label='MLE')\n", - "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.fill_between(\n", + " t,\n", + " pEpoR - 2 * sigma_pEpoR,\n", + " pEpoR + 2 * sigma_pEpoR,\n", + " color=\"black\",\n", + " alpha=0.10,\n", + " interpolate=True,\n", + " label=\"2-sigma error bands\",\n", + ")\n", + "ax.plot(t, pEpoR, color=\"black\", label=\"MLE\")\n", + "ax.plot(\n", + " df_pEpoR[\"time\"],\n", + " df_pEpoR[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pEpoR\")\n", @@ -581,10 +653,25 @@ "# Plot ML fit for pSTAT5\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "t, pSTAT5 = simulate_pSTAT5()\n", - "ax.plot(t, pSTAT5, color='black', label='MLE')\n", - "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(t, pSTAT5, color=\"black\", label=\"MLE\")\n", + "ax.plot(\n", + " df_pSTAT5[\"time\"],\n", + " df_pSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pSTAT5\")\n", @@ -614,10 +701,25 @@ "# Plot ML fit for tSTAT5\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "t, tSTAT5 = simulate_tSTAT5()\n", - "ax.plot(t, tSTAT5, color='black', label='MLE')\n", - "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(t, tSTAT5, color=\"black\", label=\"MLE\")\n", + "ax.plot(\n", + " df_tSTAT5[\"time\"],\n", + " df_tSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"tSTAT5\")\n", @@ -634,7 +736,7 @@ "outputs": [], "source": [ "# Store results for later\n", - "all_results['5 nodes, FD'] = (pypesto_problem, pypesto_result)" + "all_results[\"5 nodes, FD\"] = (pypesto_problem, pypesto_result)" ] }, { @@ -680,9 +782,11 @@ "source": [ "# Create spline for pEpoR\n", "nodes = [0, 2.5, 5.0, 7.5, 10.0, 12.5, 15.0, 17.5, 20, 25, 30, 35, 40, 50, 60]\n", - "values_at_nodes = [sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes]\n", + "values_at_nodes = [\n", + " sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes\n", + "]\n", "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='pEpoR',\n", + " sbml_id=\"pEpoR\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=nodes,\n", " values_at_nodes=values_at_nodes,\n", @@ -726,17 +830,34 @@ "source": [ "# Add a parameter for regularization strength\n", "reg_parameters_df = pd.DataFrame(\n", - " dict(parameterScale='log10', lowerBound=1e-6, upperBound=1e6, nominalValue=1.0, estimate=0),\n", - " index=pd.Series(['regularization_strength'], name=\"parameterId\"),\n", + " dict(\n", + " parameterScale=\"log10\",\n", + " lowerBound=1e-6,\n", + " upperBound=1e6,\n", + " nominalValue=1.0,\n", + " estimate=0,\n", + " ),\n", + " index=pd.Series([\"regularization_strength\"], name=\"parameterId\"),\n", ")\n", "# Encode regularization term as an additional observable\n", "reg_observables_df = pd.DataFrame(\n", - " dict(observableFormula=f'sqrt({regularization})'.replace('**', '^'), observableTransformation='lin', noiseFormula='1/sqrt(regularization_strength)', noiseDistribution='normal'),\n", - " index=pd.Series(['regularization'], name=\"observableId\"),\n", + " dict(\n", + " observableFormula=f\"sqrt({regularization})\".replace(\"**\", \"^\"),\n", + " observableTransformation=\"lin\",\n", + " noiseFormula=\"1/sqrt(regularization_strength)\",\n", + " noiseDistribution=\"normal\",\n", + " ),\n", + " index=pd.Series([\"regularization\"], name=\"observableId\"),\n", ")\n", "# and correspoding measurement\n", "reg_measurements_df = pd.DataFrame(\n", - " dict(observableId='regularization', simulationConditionId='condition1', measurement=0, time=0, observableTransformation='lin'),\n", + " dict(\n", + " observableId=\"regularization\",\n", + " simulationConditionId=\"condition1\",\n", + " measurement=0,\n", + " time=0,\n", + " observableTransformation=\"lin\",\n", + " ),\n", " index=pd.Series([0]),\n", ")" ] @@ -751,9 +872,13 @@ "outputs": [], "source": [ "# Add spline formula to SBML model\n", - "sbml_doc = libsbml.SBMLReader().readSBML(os.path.join('Swameye_PNAS2003', 'swameye2003_model.xml'))\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_model.xml\")\n", + ")\n", "sbml_model = sbml_doc.getModel()\n", - "spline.add_to_sbml_model(sbml_model, auto_add=True, y_nominal=0.1, y_constant=True)" + "spline.add_to_sbml_model(\n", + " sbml_model, auto_add=True, y_nominal=0.1, y_constant=True\n", + ")" ] }, { @@ -767,7 +892,13 @@ "source": [ "# Extra parameters associated to the spline\n", "spline_parameters_df = pd.DataFrame(\n", - " dict(parameterScale='log', lowerBound=0.001, upperBound=10, nominalValue=0.1, estimate=1),\n", + " dict(\n", + " parameterScale=\"log\",\n", + " lowerBound=0.001,\n", + " upperBound=10,\n", + " nominalValue=0.1,\n", + " estimate=1,\n", + " ),\n", " index=pd.Series(list(map(str, values_at_nodes)), name=\"parameterId\"),\n", ")" ] @@ -784,18 +915,30 @@ "# Create PEtab problem\n", "petab_problem = petab.Problem(\n", " sbml_model,\n", - " condition_df=petab.conditions.get_condition_df(os.path.join('Swameye_PNAS2003', 'swameye2003_conditions.tsv')),\n", + " condition_df=petab.conditions.get_condition_df(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_conditions.tsv\")\n", + " ),\n", " measurement_df=petab.core.concat_tables(\n", - " [os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv'), reg_measurements_df],\n", - " petab.measurements.get_measurement_df\n", + " [\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\"),\n", + " reg_measurements_df,\n", + " ],\n", + " petab.measurements.get_measurement_df,\n", " ).reset_index(drop=True),\n", " parameter_df=petab.core.concat_tables(\n", - " [os.path.join('Swameye_PNAS2003', 'swameye2003_parameters.tsv'), spline_parameters_df, reg_parameters_df],\n", - " petab.parameters.get_parameter_df\n", + " [\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_parameters.tsv\"),\n", + " spline_parameters_df,\n", + " reg_parameters_df,\n", + " ],\n", + " petab.parameters.get_parameter_df,\n", " ),\n", " observable_df=petab.core.concat_tables(\n", - " [os.path.join('Swameye_PNAS2003', 'swameye2003_observables.tsv'), reg_observables_df],\n", - " petab.observables.get_observable_df\n", + " [\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_observables.tsv\"),\n", + " reg_observables_df,\n", + " ],\n", + " petab.observables.get_observable_df,\n", " ),\n", ")" ] @@ -875,7 +1018,9 @@ "outputs": [], "source": [ "# Import PEtab problem into pyPESTO\n", - "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()" + "pypesto_problem = pypesto.petab.PetabImporter(\n", + " petab_problem, model_name=name\n", + ").create_problem()" ] }, { @@ -898,7 +1043,7 @@ "source": [ "# Try different regularization strengths\n", "regstrengths = np.asarray([1, 10, 40, 75, 150, 500])\n", - "if os.getenv('GITHUB_ACTIONS') is not None:\n", + "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n", " regstrengths = np.asarray([75])\n", "regproblems = {}\n", "regresults = {}\n", @@ -907,14 +1052,16 @@ " # Fix parameter in pypesto problem\n", " name = f\"Swameye_PNAS2003_15nodes_FD_reg{regstrength}\"\n", " pypesto_problem.fix_parameters(\n", - " pypesto_problem.x_names.index('regularization_strength'),\n", - " np.log10(regstrength) # parameter is specified as log10 scale in PEtab\n", + " pypesto_problem.x_names.index(\"regularization_strength\"),\n", + " np.log10(\n", + " regstrength\n", + " ), # parameter is specified as log10 scale in PEtab\n", " )\n", " regproblem = copy.deepcopy(pypesto_problem)\n", "\n", " # Load existing results if available\n", - " if os.path.exists(f'{name}.h5'):\n", - " regresult = pypesto.store.read_result(f'{name}.h5', problem=regproblem)\n", + " if os.path.exists(f\"{name}.h5\"):\n", + " regresult = pypesto.store.read_result(f\"{name}.h5\", problem=regproblem)\n", " else:\n", " regresult = None\n", " # Overwrite\n", @@ -926,7 +1073,9 @@ " new_ids = [str(i) for i in range(n_starts)]\n", " else:\n", " last_id = max(int(i) for i in regresult.optimize_result.id)\n", - " new_ids = [str(i) for i in range(last_id+1, last_id+n_starts+1)]\n", + " new_ids = [\n", + " str(i) for i in range(last_id + 1, last_id + n_starts + 1)\n", + " ]\n", " regresult = pypesto.optimize.minimize(\n", " regproblem,\n", " n_starts=n_starts,\n", @@ -937,7 +1086,9 @@ " )\n", " regresult.optimize_result.sort()\n", " if regresult.optimize_result.x[0] is None:\n", - " raise Exception(\"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\")\n", + " raise Exception(\n", + " \"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\"\n", + " )\n", "\n", " # Save results to disk\n", " # pypesto.store.write_result(regresult, f'{name}.h5', overwrite=True)\n", @@ -975,15 +1126,21 @@ "regstrengths = sorted(regproblems.keys())\n", "stats = []\n", "for regstrength in regstrengths:\n", - " t, pEpoR = simulate_pEpoR(N=None, problem=regproblems[regstrength], result=regresults[regstrength])\n", - " assert np.array_equal(df_pEpoR['time'], t[:-1])\n", + " t, pEpoR = simulate_pEpoR(\n", + " N=None,\n", + " problem=regproblems[regstrength],\n", + " result=regresults[regstrength],\n", + " )\n", + " assert np.array_equal(df_pEpoR[\"time\"], t[:-1])\n", " pEpoR = pEpoR[:-1]\n", " sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n", - " stat = np.sum(((pEpoR - df_pEpoR['measurement']) / sigma_pEpoR)**2)\n", + " stat = np.sum(((pEpoR - df_pEpoR[\"measurement\"]) / sigma_pEpoR) ** 2)\n", " print(f\"Regularization strength: {regstrength}. Statistic is {stat}\")\n", " stats.append(stat)\n", "# Select best regularization strength\n", - "chosen_regstrength = regstrengths[np.abs(np.asarray(stats) - len(df_pEpoR['time'])).argmin()]" + "chosen_regstrength = regstrengths[\n", + " np.abs(np.asarray(stats) - len(df_pEpoR[\"time\"])).argmin()\n", + "]" ] }, { @@ -1007,8 +1164,12 @@ ], "source": [ "# Visualize the results of the multistarts for a chosen regularization strength\n", - "ax = pypesto.visualize.waterfall(regresults[chosen_regstrength], size=[6.5, 3.5])\n", - "ax.set_title(f\"Waterfall plot (regularization strength = {chosen_regstrength})\")\n", + "ax = pypesto.visualize.waterfall(\n", + " regresults[chosen_regstrength], size=[6.5, 3.5]\n", + ")\n", + "ax.set_title(\n", + " f\"Waterfall plot (regularization strength = {chosen_regstrength})\"\n", + ")\n", "ax.set_ylim(ax.get_ylim()[0], 100);" ] }, @@ -1035,15 +1196,36 @@ "# Plot ML fit for pEpoR (all regularization strengths)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "for regstrength in sorted(regproblems.keys()):\n", - " t, pEpoR = simulate_pEpoR(problem=regproblems[regstrength], result=regresults[regstrength])\n", + " t, pEpoR = simulate_pEpoR(\n", + " problem=regproblems[regstrength], result=regresults[regstrength]\n", + " )\n", " if regstrength == chosen_regstrength:\n", - " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n", + " kwargs = dict(\n", + " color=\"black\",\n", + " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n", + " zorder=2,\n", + " )\n", " else:\n", - " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n", + " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n", " ax.plot(t, pEpoR, **kwargs)\n", - "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_pEpoR[\"time\"],\n", + " df_pEpoR[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pEpoR\")\n", @@ -1079,19 +1261,40 @@ "# Plot ML fit for pSTAT5 (all regularization strengths)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "for regstrength in sorted(regproblems.keys()):\n", - " t, pSTAT5 = simulate_pSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n", + " t, pSTAT5 = simulate_pSTAT5(\n", + " problem=regproblems[regstrength], result=regresults[regstrength]\n", + " )\n", " if regstrength == chosen_regstrength:\n", - " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n", + " kwargs = dict(\n", + " color=\"black\",\n", + " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n", + " zorder=2,\n", + " )\n", " else:\n", - " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n", + " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n", " ax.plot(t, pSTAT5, **kwargs)\n", - "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_pSTAT5[\"time\"],\n", + " df_pSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pSTAT5\");\n", - "#ax.legend();" + "# ax.legend();" ] }, { @@ -1117,19 +1320,40 @@ "# Plot ML fit for tSTAT5 (all regularization strengths)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "for regstrength in sorted(regproblems.keys()):\n", - " t, tSTAT5 = simulate_tSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n", + " t, tSTAT5 = simulate_tSTAT5(\n", + " problem=regproblems[regstrength], result=regresults[regstrength]\n", + " )\n", " if regstrength == chosen_regstrength:\n", - " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n", + " kwargs = dict(\n", + " color=\"black\",\n", + " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n", + " zorder=2,\n", + " )\n", " else:\n", - " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n", + " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n", " ax.plot(t, tSTAT5, **kwargs)\n", - "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_tSTAT5[\"time\"],\n", + " df_tSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"tSTAT5\");\n", - "#ax.legend();" + "# ax.legend();" ] }, { @@ -1154,13 +1378,39 @@ "source": [ "# Plot ML fit for pEpoR (single regularization strength with noise model)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", - "t, pEpoR = simulate_pEpoR(problem=regproblems[chosen_regstrength], result=regresults[chosen_regstrength])\n", + "t, pEpoR = simulate_pEpoR(\n", + " problem=regproblems[chosen_regstrength],\n", + " result=regresults[chosen_regstrength],\n", + ")\n", "sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n", - "ax.fill_between(t, pEpoR - 2*sigma_pEpoR, pEpoR + 2*sigma_pEpoR, color='black', alpha=0.10, interpolate=True, label='2-sigma error bands')\n", - "ax.plot(t, pEpoR, color='black', label='MLE')\n", - "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.fill_between(\n", + " t,\n", + " pEpoR - 2 * sigma_pEpoR,\n", + " pEpoR + 2 * sigma_pEpoR,\n", + " color=\"black\",\n", + " alpha=0.10,\n", + " interpolate=True,\n", + " label=\"2-sigma error bands\",\n", + ")\n", + "ax.plot(t, pEpoR, color=\"black\", label=\"MLE\")\n", + "ax.plot(\n", + " df_pEpoR[\"time\"],\n", + " df_pEpoR[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pEpoR\")\n", @@ -1178,7 +1428,10 @@ "outputs": [], "source": [ "# Store results for later\n", - "all_results['15 nodes, FD'] = (regproblems[chosen_regstrength], regresults[chosen_regstrength])" + "all_results[\"15 nodes, FD\"] = (\n", + " regproblems[chosen_regstrength],\n", + " regresults[chosen_regstrength],\n", + ")" ] }, { @@ -1232,14 +1485,20 @@ "source": [ "# Create spline for pEpoR\n", "nodes = [0, 5, 10, 20, 60]\n", - "values_at_nodes = [sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes]\n", - "derivatives_at_nodes = [sp.Symbol(f\"derivative_pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes[:-1]]\n", + "values_at_nodes = [\n", + " sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes\n", + "]\n", + "derivatives_at_nodes = [\n", + " sp.Symbol(f\"derivative_pEpoR_t{str(t).replace('.', '_dot_')}\")\n", + " for t in nodes[:-1]\n", + "]\n", "spline = amici.splines.CubicHermiteSpline(\n", - " sbml_id='pEpoR',\n", + " sbml_id=\"pEpoR\",\n", " evaluate_at=amici.sbml_utils.amici_time_symbol,\n", " nodes=nodes,\n", " values_at_nodes=values_at_nodes,\n", - " derivatives_at_nodes=derivatives_at_nodes + [0], # last value is zero because steady state is reached\n", + " derivatives_at_nodes=derivatives_at_nodes\n", + " + [0], # last value is zero because steady state is reached\n", " extrapolate=(None, \"constant\"),\n", " bc=\"auto\",\n", " logarithmic_parametrization=True,\n", @@ -1270,17 +1529,34 @@ "source": [ "# Add a parameter for regularization strength\n", "reg_parameters_df = pd.DataFrame(\n", - " dict(parameterScale='log10', lowerBound=1e-6, upperBound=1e6, nominalValue=1.0, estimate=0),\n", - " index=pd.Series(['regularization_strength'], name=\"parameterId\"),\n", + " dict(\n", + " parameterScale=\"log10\",\n", + " lowerBound=1e-6,\n", + " upperBound=1e6,\n", + " nominalValue=1.0,\n", + " estimate=0,\n", + " ),\n", + " index=pd.Series([\"regularization_strength\"], name=\"parameterId\"),\n", ")\n", "# Encode regularization term as an additional observable\n", "reg_observables_df = pd.DataFrame(\n", - " dict(observableFormula=f'sqrt({regularization})'.replace('**', '^'), observableTransformation='lin', noiseFormula='1/sqrt(regularization_strength)', noiseDistribution='normal'),\n", - " index=pd.Series(['regularization'], name=\"observableId\"),\n", + " dict(\n", + " observableFormula=f\"sqrt({regularization})\".replace(\"**\", \"^\"),\n", + " observableTransformation=\"lin\",\n", + " noiseFormula=\"1/sqrt(regularization_strength)\",\n", + " noiseDistribution=\"normal\",\n", + " ),\n", + " index=pd.Series([\"regularization\"], name=\"observableId\"),\n", ")\n", "# and correspoding measurement\n", "reg_measurements_df = pd.DataFrame(\n", - " dict(observableId='regularization', simulationConditionId='condition1', measurement=0, time=0, observableTransformation='lin'),\n", + " dict(\n", + " observableId=\"regularization\",\n", + " simulationConditionId=\"condition1\",\n", + " measurement=0,\n", + " time=0,\n", + " observableTransformation=\"lin\",\n", + " ),\n", " index=pd.Series([0]),\n", ")" ] @@ -1295,9 +1571,13 @@ "outputs": [], "source": [ "# Add spline formula to SBML model\n", - "sbml_doc = libsbml.SBMLReader().readSBML(os.path.join('Swameye_PNAS2003', 'swameye2003_model.xml'))\n", + "sbml_doc = libsbml.SBMLReader().readSBML(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_model.xml\")\n", + ")\n", "sbml_model = sbml_doc.getModel()\n", - "spline.add_to_sbml_model(sbml_model, auto_add=True, y_nominal=0.1, y_constant=True)" + "spline.add_to_sbml_model(\n", + " sbml_model, auto_add=True, y_nominal=0.1, y_constant=True\n", + ")" ] }, { @@ -1325,11 +1605,23 @@ "source": [ "# Extra parameters associated to the spline\n", "spline_parameters_df1 = pd.DataFrame(\n", - " dict(parameterScale='log', lowerBound=0.001, upperBound=10, nominalValue=0.1, estimate=1),\n", + " dict(\n", + " parameterScale=\"log\",\n", + " lowerBound=0.001,\n", + " upperBound=10,\n", + " nominalValue=0.1,\n", + " estimate=1,\n", + " ),\n", " index=pd.Series(list(map(str, values_at_nodes)), name=\"parameterId\"),\n", ")\n", "spline_parameters_df2 = pd.DataFrame(\n", - " dict(parameterScale='lin', lowerBound=-0.666, upperBound=0.666, nominalValue=0.0, estimate=1),\n", + " dict(\n", + " parameterScale=\"lin\",\n", + " lowerBound=-0.666,\n", + " upperBound=0.666,\n", + " nominalValue=0.0,\n", + " estimate=1,\n", + " ),\n", " index=pd.Series(list(map(str, derivatives_at_nodes)), name=\"parameterId\"),\n", ")" ] @@ -1346,18 +1638,31 @@ "# Create PEtab problem\n", "petab_problem = petab.Problem(\n", " sbml_model,\n", - " condition_df=petab.conditions.get_condition_df(os.path.join('Swameye_PNAS2003', 'swameye2003_conditions.tsv')),\n", + " condition_df=petab.conditions.get_condition_df(\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_conditions.tsv\")\n", + " ),\n", " measurement_df=petab.core.concat_tables(\n", - " [os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv'), reg_measurements_df],\n", - " petab.measurements.get_measurement_df\n", + " [\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\"),\n", + " reg_measurements_df,\n", + " ],\n", + " petab.measurements.get_measurement_df,\n", " ).reset_index(drop=True),\n", " parameter_df=petab.core.concat_tables(\n", - " [os.path.join('Swameye_PNAS2003', 'swameye2003_parameters.tsv'), spline_parameters_df1, spline_parameters_df2, reg_parameters_df],\n", - " petab.parameters.get_parameter_df\n", + " [\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_parameters.tsv\"),\n", + " spline_parameters_df1,\n", + " spline_parameters_df2,\n", + " reg_parameters_df,\n", + " ],\n", + " petab.parameters.get_parameter_df,\n", " ),\n", " observable_df=petab.core.concat_tables(\n", - " [os.path.join('Swameye_PNAS2003', 'swameye2003_observables.tsv'), reg_observables_df],\n", - " petab.observables.get_observable_df\n", + " [\n", + " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_observables.tsv\"),\n", + " reg_observables_df,\n", + " ],\n", + " petab.observables.get_observable_df,\n", " ),\n", ")" ] @@ -1437,7 +1742,9 @@ "outputs": [], "source": [ "# Import PEtab problem into pyPESTO\n", - "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()" + "pypesto_problem = pypesto.petab.PetabImporter(\n", + " petab_problem, model_name=name\n", + ").create_problem()" ] }, { @@ -1459,7 +1766,7 @@ "source": [ "# Try different regularization strengths\n", "regstrengths = np.asarray([1, 175, 500, 1000])\n", - "if os.getenv('GITHUB_ACTIONS') is not None:\n", + "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n", " regstrengths = np.asarray([175])\n", "regproblems = {}\n", "regresults = {}\n", @@ -1468,14 +1775,16 @@ " # Fix parameter in pypesto problem\n", " name = f\"Swameye_PNAS2003_5nodes_reg{regstrength}\"\n", " pypesto_problem.fix_parameters(\n", - " pypesto_problem.x_names.index('regularization_strength'),\n", - " np.log10(regstrength) # parameter is specified as log10 scale in PEtab\n", + " pypesto_problem.x_names.index(\"regularization_strength\"),\n", + " np.log10(\n", + " regstrength\n", + " ), # parameter is specified as log10 scale in PEtab\n", " )\n", " regproblem = copy.deepcopy(pypesto_problem)\n", "\n", " # Load existing results if available\n", - " if os.path.exists(f'{name}.h5'):\n", - " regresult = pypesto.store.read_result(f'{name}.h5', problem=regproblem)\n", + " if os.path.exists(f\"{name}.h5\"):\n", + " regresult = pypesto.store.read_result(f\"{name}.h5\", problem=regproblem)\n", " else:\n", " regresult = None\n", " # Overwrite\n", @@ -1487,7 +1796,9 @@ " new_ids = [str(i) for i in range(n_starts)]\n", " else:\n", " last_id = max(int(i) for i in regresult.optimize_result.id)\n", - " new_ids = [str(i) for i in range(last_id+1, last_id+n_starts+1)]\n", + " new_ids = [\n", + " str(i) for i in range(last_id + 1, last_id + n_starts + 1)\n", + " ]\n", " regresult = pypesto.optimize.minimize(\n", " regproblem,\n", " n_starts=n_starts,\n", @@ -1498,7 +1809,9 @@ " )\n", " regresult.optimize_result.sort()\n", " if regresult.optimize_result.x[0] is None:\n", - " raise Exception(\"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\")\n", + " raise Exception(\n", + " \"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\"\n", + " )\n", "\n", " # Save results to disk\n", " # pypesto.store.write_result(regresult, f'{name}.h5', overwrite=True)\n", @@ -1534,15 +1847,21 @@ "regstrengths = sorted(regproblems.keys())\n", "stats = []\n", "for regstrength in regstrengths:\n", - " t, pEpoR = simulate_pEpoR(N=None, problem=regproblems[regstrength], result=regresults[regstrength])\n", - " assert np.array_equal(df_pEpoR['time'], t[:-1])\n", + " t, pEpoR = simulate_pEpoR(\n", + " N=None,\n", + " problem=regproblems[regstrength],\n", + " result=regresults[regstrength],\n", + " )\n", + " assert np.array_equal(df_pEpoR[\"time\"], t[:-1])\n", " pEpoR = pEpoR[:-1]\n", " sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n", - " stat = np.sum(((pEpoR - df_pEpoR['measurement']) / sigma_pEpoR)**2)\n", + " stat = np.sum(((pEpoR - df_pEpoR[\"measurement\"]) / sigma_pEpoR) ** 2)\n", " print(f\"Regularization strength: {regstrength}. Statistic is {stat}\")\n", " stats.append(stat)\n", "# Select best regularization strength\n", - "chosen_regstrength = regstrengths[np.abs(np.asarray(stats) - len(df_pEpoR['time'])).argmin()]" + "chosen_regstrength = regstrengths[\n", + " np.abs(np.asarray(stats) - len(df_pEpoR[\"time\"])).argmin()\n", + "]" ] }, { @@ -1566,8 +1885,12 @@ ], "source": [ "# Visualize the results of the multistarts for a chosen regularization strength\n", - "ax = pypesto.visualize.waterfall(regresults[chosen_regstrength], size=[6.5, 3.5])\n", - "ax.set_title(f\"Waterfall plot (regularization strength = {chosen_regstrength})\")\n", + "ax = pypesto.visualize.waterfall(\n", + " regresults[chosen_regstrength], size=[6.5, 3.5]\n", + ")\n", + "ax.set_title(\n", + " f\"Waterfall plot (regularization strength = {chosen_regstrength})\"\n", + ")\n", "ax.set_ylim(ax.get_ylim()[0], 100);" ] }, @@ -1594,15 +1917,36 @@ "# Plot ML fit for pEpoR (all regularization strengths)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "for regstrength in sorted(regproblems.keys()):\n", - " t, pEpoR = simulate_pEpoR(problem=regproblems[regstrength], result=regresults[regstrength])\n", + " t, pEpoR = simulate_pEpoR(\n", + " problem=regproblems[regstrength], result=regresults[regstrength]\n", + " )\n", " if regstrength == chosen_regstrength:\n", - " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n", + " kwargs = dict(\n", + " color=\"black\",\n", + " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n", + " zorder=2,\n", + " )\n", " else:\n", - " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n", + " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n", " ax.plot(t, pEpoR, **kwargs)\n", - "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_pEpoR[\"time\"],\n", + " df_pEpoR[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pEpoR\")\n", @@ -1637,15 +1981,36 @@ "# Plot ML fit for pSTAT5 (all regularization strengths)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "for regstrength in sorted(regproblems.keys()):\n", - " t, pSTAT5 = simulate_pSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n", + " t, pSTAT5 = simulate_pSTAT5(\n", + " problem=regproblems[regstrength], result=regresults[regstrength]\n", + " )\n", " if regstrength == chosen_regstrength:\n", - " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n", + " kwargs = dict(\n", + " color=\"black\",\n", + " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n", + " zorder=2,\n", + " )\n", " else:\n", - " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n", + " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n", " ax.plot(t, pSTAT5, **kwargs)\n", - "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_pSTAT5[\"time\"],\n", + " df_pSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pSTAT5\");\n", @@ -1675,15 +2040,36 @@ "# Plot ML fit for tSTAT5 (all regularization strengths)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", "for regstrength in sorted(regproblems.keys()):\n", - " t, tSTAT5 = simulate_tSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n", + " t, tSTAT5 = simulate_tSTAT5(\n", + " problem=regproblems[regstrength], result=regresults[regstrength]\n", + " )\n", " if regstrength == chosen_regstrength:\n", - " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n", + " kwargs = dict(\n", + " color=\"black\",\n", + " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n", + " zorder=2,\n", + " )\n", " else:\n", - " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n", + " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n", " ax.plot(t, tSTAT5, **kwargs)\n", - "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_tSTAT5[\"time\"],\n", + " df_tSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"tSTAT5\");\n", @@ -1712,13 +2098,39 @@ "source": [ "# Plot ML fit for pEpoR (single regularization strength with noise model)\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", - "t, pEpoR = simulate_pEpoR(problem=regproblems[chosen_regstrength], result=regresults[chosen_regstrength])\n", + "t, pEpoR = simulate_pEpoR(\n", + " problem=regproblems[chosen_regstrength],\n", + " result=regresults[chosen_regstrength],\n", + ")\n", "sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n", - "ax.fill_between(t, pEpoR - 2*sigma_pEpoR, pEpoR + 2*sigma_pEpoR, color='black', alpha=0.10, interpolate=True, label='2-sigma error bands')\n", - "ax.plot(t, pEpoR, color='black', label='MLE')\n", - "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.fill_between(\n", + " t,\n", + " pEpoR - 2 * sigma_pEpoR,\n", + " pEpoR + 2 * sigma_pEpoR,\n", + " color=\"black\",\n", + " alpha=0.10,\n", + " interpolate=True,\n", + " label=\"2-sigma error bands\",\n", + ")\n", + "ax.plot(t, pEpoR, color=\"black\", label=\"MLE\")\n", + "ax.plot(\n", + " df_pEpoR[\"time\"],\n", + " df_pEpoR[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ylim1 = ax.get_ylim()[0]\n", - "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n", + "ax.plot(\n", + " nodes,\n", + " len(nodes) * [ylim1],\n", + " \"x\",\n", + " color=\"black\",\n", + " label=\"spline nodes\",\n", + " zorder=10,\n", + " clip_on=False,\n", + ")\n", "ax.set_ylim(ylim1, ax.get_ylim()[1])\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pEpoR\")\n", @@ -1736,7 +2148,10 @@ "outputs": [], "source": [ "# Store results for later\n", - "all_results['5 nodes'] = (regproblems[chosen_regstrength], regresults[chosen_regstrength])" + "all_results[\"5 nodes\"] = (\n", + " regproblems[chosen_regstrength],\n", + " regresults[chosen_regstrength],\n", + ")" ] }, { @@ -1769,10 +2184,17 @@ "source": [ "# Plot ML fit for pEpoR\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", - "for (label, (problem, result)) in all_results.items():\n", + "for label, (problem, result) in all_results.items():\n", " t, pEpoR = simulate_pEpoR(problem=problem, result=result)\n", " ax.plot(t, pEpoR, label=label)\n", - "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_pEpoR[\"time\"],\n", + " df_pEpoR[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pEpoR\")\n", "ax.legend();" @@ -1800,10 +2222,17 @@ "source": [ "# Plot ML fit for pSTAT5\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", - "for (label, (problem, result)) in all_results.items():\n", + "for label, (problem, result) in all_results.items():\n", " t, pSTAT5 = simulate_pSTAT5(problem=problem, result=result)\n", " ax.plot(t, pSTAT5, label=label)\n", - "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_pSTAT5[\"time\"],\n", + " df_pSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"pSTAT5\")\n", "ax.legend();" @@ -1831,10 +2260,17 @@ "source": [ "# Plot ML fit for tSTAT5\n", "fig, ax = plt.subplots(figsize=(6.5, 3.5))\n", - "for (label, (problem, result)) in all_results.items():\n", + "for label, (problem, result) in all_results.items():\n", " t, tSTAT5 = simulate_tSTAT5(problem=problem, result=result)\n", " ax.plot(t, tSTAT5, label=label)\n", - "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n", + "ax.plot(\n", + " df_tSTAT5[\"time\"],\n", + " df_tSTAT5[\"measurement\"],\n", + " \"o\",\n", + " color=\"black\",\n", + " markerfacecolor=\"none\",\n", + " label=\"experimental data\",\n", + ")\n", "ax.set_xlabel(\"time\")\n", "ax.set_ylabel(\"tSTAT5\")\n", "ax.legend();" @@ -1918,14 +2354,14 @@ ], "source": [ "# Compare parameter values\n", - "for (label, (problem, result)) in all_results.items():\n", + "for label, (problem, result) in all_results.items():\n", " print(f\"\\n### {label}\")\n", " x = result.optimize_result.x[0]\n", " if len(x) == len(problem.x_free_indices):\n", " names = problem.x_names[problem.x_free_indices]\n", " else:\n", " names = problem.x_names\n", - " for (name, value) in zip(names, x):\n", + " for name, value in zip(names, x):\n", " print(f\"{name} = {value}\")" ] }, diff --git a/python/examples/example_steadystate/ExampleSteadystate.ipynb b/python/examples/example_steadystate/ExampleSteadystate.ipynb index 0d9765e727..502174fe15 100644 --- a/python/examples/example_steadystate/ExampleSteadystate.ipynb +++ b/python/examples/example_steadystate/ExampleSteadystate.ipynb @@ -16,9 +16,9 @@ "outputs": [], "source": [ "# SBML model we want to import\n", - "sbml_file = 'model_steadystate_scaled_without_observables.xml'\n", + "sbml_file = \"model_steadystate_scaled_without_observables.xml\"\n", "# Name of the model that will also be the name of the python module\n", - "model_name = 'model_steadystate_scaled'\n", + "model_name = \"model_steadystate_scaled\"\n", "# Directory to which the generated model code is written\n", "model_output_dir = model_name\n", "\n", @@ -67,18 +67,41 @@ "sbml_model = sbml_doc.getModel()\n", "dir(sbml_doc)\n", "\n", - "print('Species: ', [s.getId() for s in sbml_model.getListOfSpecies()])\n", + "print(\"Species: \", [s.getId() for s in sbml_model.getListOfSpecies()])\n", "\n", - "print('\\nReactions:')\n", + "print(\"\\nReactions:\")\n", "for reaction in sbml_model.getListOfReactions():\n", - " reactants = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfReactants()])\n", - " products = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfProducts()])\n", - " reversible = '<' if reaction.getReversible() else ''\n", - " print('%3s: %10s %1s->%10s\\t\\t[%s]' % (reaction.getId(),\n", - " reactants,\n", - " reversible,\n", - " products,\n", - " libsbml.formulaToL3String(reaction.getKineticLaw().getMath())))\n" + " reactants = \" + \".join(\n", + " [\n", + " \"%s %s\"\n", + " % (\n", + " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n", + " r.getSpecies(),\n", + " )\n", + " for r in reaction.getListOfReactants()\n", + " ]\n", + " )\n", + " products = \" + \".join(\n", + " [\n", + " \"%s %s\"\n", + " % (\n", + " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n", + " r.getSpecies(),\n", + " )\n", + " for r in reaction.getListOfProducts()\n", + " ]\n", + " )\n", + " reversible = \"<\" if reaction.getReversible() else \"\"\n", + " print(\n", + " \"%3s: %10s %1s->%10s\\t\\t[%s]\"\n", + " % (\n", + " reaction.getId(),\n", + " reactants,\n", + " reversible,\n", + " products,\n", + " libsbml.formulaToL3String(reaction.getKineticLaw().getMath()),\n", + " )\n", + " )" ] }, { @@ -122,7 +145,7 @@ "metadata": {}, "outputs": [], "source": [ - "constantParameters = ['k0']" + "constantParameters = [\"k0\"]" ] }, { @@ -144,12 +167,12 @@ "source": [ "# Define observables\n", "observables = {\n", - " 'observable_x1': {'name': '', 'formula': 'x1'},\n", - " 'observable_x2': {'name': '', 'formula': 'x2'},\n", - " 'observable_x3': {'name': '', 'formula': 'x3'},\n", - " 'observable_x1_scaled': {'name': '', 'formula': 'scaling_x1 * x1'},\n", - " 'observable_x2_offsetted': {'name': '', 'formula': 'offset_x2 + x2'},\n", - " 'observable_x1withsigma': {'name': '', 'formula': 'x1'}\n", + " \"observable_x1\": {\"name\": \"\", \"formula\": \"x1\"},\n", + " \"observable_x2\": {\"name\": \"\", \"formula\": \"x2\"},\n", + " \"observable_x3\": {\"name\": \"\", \"formula\": \"x3\"},\n", + " \"observable_x1_scaled\": {\"name\": \"\", \"formula\": \"scaling_x1 * x1\"},\n", + " \"observable_x2_offsetted\": {\"name\": \"\", \"formula\": \"offset_x2 + x2\"},\n", + " \"observable_x1withsigma\": {\"name\": \"\", \"formula\": \"x1\"},\n", "}" ] }, @@ -168,7 +191,7 @@ "metadata": {}, "outputs": [], "source": [ - "sigmas = {'observable_x1withsigma': 'observable_x1withsigma_sigma'}" + "sigmas = {\"observable_x1withsigma\": \"observable_x1withsigma_sigma\"}" ] }, { @@ -312,12 +335,15 @@ ], "source": [ "import logging\n", - "sbml_importer.sbml2amici(model_name,\n", - " model_output_dir,\n", - " verbose=logging.INFO,\n", - " observables=observables,\n", - " constant_parameters=constantParameters,\n", - " sigmas=sigmas)" + "\n", + "sbml_importer.sbml2amici(\n", + " model_name,\n", + " model_output_dir,\n", + " verbose=logging.INFO,\n", + " observables=observables,\n", + " constant_parameters=constantParameters,\n", + " sigmas=sigmas,\n", + ")" ] }, { @@ -418,7 +444,9 @@ } ], "source": [ - "print('Simulation was run using model default parameters as specified in the SBML model:')\n", + "print(\n", + " \"Simulation was run using model default parameters as specified in the SBML model:\"\n", + ")\n", "print(model.getParameters())" ] }, @@ -827,9 +855,9 @@ } ], "source": [ - "#np.set_printoptions(threshold=8, edgeitems=2)\n", + "# np.set_printoptions(threshold=8, edgeitems=2)\n", "for key, value in rdata.items():\n", - " print('%12s: ' % key, value)" + " print(\"%12s: \" % key, value)" ] }, { @@ -891,8 +919,9 @@ ], "source": [ "import amici.plotting\n", - "amici.plotting.plotStateTrajectories(rdata, model = None)\n", - "amici.plotting.plotObservableTrajectories(rdata, model = None)" + "\n", + "amici.plotting.plotStateTrajectories(rdata, model=None)\n", + "amici.plotting.plotObservableTrajectories(rdata, model=None)" ] }, { @@ -934,7 +963,7 @@ "# Re-run simulation, this time passing \"experimental data\"\n", "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "\n", - "print('Log-likelihood %f' % rdata['llh'])" + "print(\"Log-likelihood %f\" % rdata[\"llh\"])" ] }, { @@ -967,9 +996,13 @@ "solver.setSensitivityOrder(amici.SensitivityOrder.none)\n", "rdata_ref = amici.runAmiciSimulation(model, solver, edata)\n", "\n", + "\n", "def get_simulation_error(solver):\n", " rdata = amici.runAmiciSimulation(model, solver, edata)\n", - " return np.mean(np.abs(rdata['x']-rdata_ref['x'])), np.mean(np.abs(rdata['llh']-rdata_ref['llh']))\n", + " return np.mean(np.abs(rdata[\"x\"] - rdata_ref[\"x\"])), np.mean(\n", + " np.abs(rdata[\"llh\"] - rdata_ref[\"llh\"])\n", + " )\n", + "\n", "\n", "def get_errors(tolfun, tols):\n", " solver.setRelativeTolerance(1e-16)\n", @@ -983,25 +1016,28 @@ " llh_errs.append(llh_err)\n", " return x_errs, llh_errs\n", "\n", - "atols = np.logspace(-5,-15, 100)\n", - "atol_x_errs, atol_llh_errs = get_errors('setAbsoluteTolerance', atols)\n", "\n", - "rtols = np.logspace(-5,-15, 100)\n", - "rtol_x_errs, rtol_llh_errs = get_errors('setRelativeTolerance', rtols)\n", + "atols = np.logspace(-5, -15, 100)\n", + "atol_x_errs, atol_llh_errs = get_errors(\"setAbsoluteTolerance\", atols)\n", + "\n", + "rtols = np.logspace(-5, -15, 100)\n", + "rtol_x_errs, rtol_llh_errs = get_errors(\"setRelativeTolerance\", rtols)\n", "\n", "fig, axes = plt.subplots(1, 2, figsize=(15, 5))\n", "\n", + "\n", "def plot_error(tols, x_errs, llh_errs, tolname, ax):\n", - " ax.plot(tols, x_errs, 'r-', label='x')\n", - " ax.plot(tols, llh_errs, 'b-', label='llh')\n", - " ax.set_xscale('log')\n", - " ax.set_yscale('log')\n", - " ax.set_xlabel(f'{tolname} tolerance')\n", - " ax.set_ylabel('average numerical error')\n", + " ax.plot(tols, x_errs, \"r-\", label=\"x\")\n", + " ax.plot(tols, llh_errs, \"b-\", label=\"llh\")\n", + " ax.set_xscale(\"log\")\n", + " ax.set_yscale(\"log\")\n", + " ax.set_xlabel(f\"{tolname} tolerance\")\n", + " ax.set_ylabel(\"average numerical error\")\n", " ax.legend()\n", "\n", - "plot_error(atols, atol_x_errs, atol_llh_errs, 'absolute', axes[0])\n", - "plot_error(rtols, rtol_x_errs, rtol_llh_errs, 'relative', axes[1])\n", + "\n", + "plot_error(atols, atol_x_errs, atol_llh_errs, \"absolute\", axes[0])\n", + "plot_error(rtols, rtol_x_errs, rtol_llh_errs, \"relative\", axes[1])\n", "\n", "# reset relative tolerance to default value\n", "solver.setRelativeTolerance(1e-8)\n", @@ -1523,21 +1559,27 @@ "source": [ "model = model_module.getModel()\n", "model.setTimepoints(np.linspace(0, 10, 11))\n", - "model.requireSensitivitiesForAllParameters() # sensitivities w.r.t. all parameters\n", + "model.requireSensitivitiesForAllParameters() # sensitivities w.r.t. all parameters\n", "# model.setParameterList([1, 2]) # sensitivities\n", "# w.r.t. the specified parameters\n", - "model.setParameterScale(amici.ParameterScaling.none) # parameters are used as-is (not log-transformed)\n", + "model.setParameterScale(\n", + " amici.ParameterScaling.none\n", + ") # parameters are used as-is (not log-transformed)\n", "\n", "solver = model.getSolver()\n", - "solver.setSensitivityMethod(amici.SensitivityMethod.forward) # forward sensitivity analysis\n", - "solver.setSensitivityOrder(amici.SensitivityOrder.first) # first-order sensitivities\n", + "solver.setSensitivityMethod(\n", + " amici.SensitivityMethod.forward\n", + ") # forward sensitivity analysis\n", + "solver.setSensitivityOrder(\n", + " amici.SensitivityOrder.first\n", + ") # first-order sensitivities\n", "\n", "rdata = amici.runAmiciSimulation(model, solver)\n", "\n", "# print sensitivity-related results\n", "for key, value in rdata.items():\n", - " if key.startswith('s'):\n", - " print('%12s: ' % key, value)" + " if key.startswith(\"s\"):\n", + " print(\"%12s: \" % key, value)" ] }, { @@ -1568,13 +1610,15 @@ "# Set model options\n", "model = model_module.getModel()\n", "p_orig = np.array(model.getParameters())\n", - "p_orig[list(model.getParameterIds()).index('observable_x1withsigma_sigma')] = 0.1 # Change default parameter\n", + "p_orig[\n", + " list(model.getParameterIds()).index(\"observable_x1withsigma_sigma\")\n", + "] = 0.1 # Change default parameter\n", "model.setParameters(p_orig)\n", "model.setParameterScale(amici.ParameterScaling.none)\n", "model.setTimepoints(np.linspace(0, 10, 21))\n", "\n", "solver = model.getSolver()\n", - "solver.setMaxSteps(10**4) # Set maximum number of steps for the solver\n", + "solver.setMaxSteps(10**4) # Set maximum number of steps for the solver\n", "\n", "# simulate time-course to get artificial data\n", "rdata = amici.runAmiciSimulation(model, solver)\n", @@ -1582,18 +1626,22 @@ "edata.fixedParameters = model.getFixedParameters()\n", "# set sigma to 1.0 except for observable 5, so that p[7] is used instead\n", "# (if we have sigma parameterized, the corresponding ExpData entries must NaN, otherwise they will override the parameter)\n", - "edata.setObservedDataStdDev(rdata['t']*0+np.nan,\n", - " list(model.getObservableIds()).index('observable_x1withsigma'))\n", + "edata.setObservedDataStdDev(\n", + " rdata[\"t\"] * 0 + np.nan,\n", + " list(model.getObservableIds()).index(\"observable_x1withsigma\"),\n", + ")\n", "\n", "# enable sensitivities\n", - "solver.setSensitivityOrder(amici.SensitivityOrder.first) # First-order ...\n", - "solver.setSensitivityMethod(amici.SensitivityMethod.adjoint) # ... adjoint sensitivities\n", - "model.requireSensitivitiesForAllParameters() # ... w.r.t. all parameters\n", + "solver.setSensitivityOrder(amici.SensitivityOrder.first) # First-order ...\n", + "solver.setSensitivityMethod(\n", + " amici.SensitivityMethod.adjoint\n", + ") # ... adjoint sensitivities\n", + "model.requireSensitivitiesForAllParameters() # ... w.r.t. all parameters\n", "\n", "# compute adjoint sensitivities\n", "rdata = amici.runAmiciSimulation(model, solver, edata)\n", - "#print(rdata['sigmay'])\n", - "print('Log-likelihood: %f\\nGradient: %s' % (rdata['llh'], rdata['sllh']))\n" + "# print(rdata['sigmay'])\n", + "print(\"Log-likelihood: %f\\nGradient: %s\" % (rdata[\"llh\"], rdata[\"sllh\"]))" ] }, { @@ -1657,12 +1705,13 @@ "source": [ "from scipy.optimize import check_grad\n", "\n", - "def func(x0, symbol='llh', x0full=None, plist=[], verbose=False):\n", + "\n", + "def func(x0, symbol=\"llh\", x0full=None, plist=[], verbose=False):\n", " p = x0[:]\n", " if len(plist):\n", " p = x0full[:]\n", " p[plist] = x0\n", - " verbose and print('f: p=%s' % p)\n", + " verbose and print(\"f: p=%s\" % p)\n", "\n", " old_parameters = model.getParameters()\n", " solver.setSensitivityOrder(amici.SensitivityOrder.none)\n", @@ -1675,7 +1724,8 @@ " verbose and print(res)\n", " return res\n", "\n", - "def grad(x0, symbol='llh', x0full=None, plist=[], verbose=False):\n", + "\n", + "def grad(x0, symbol=\"llh\", x0full=None, plist=[], verbose=False):\n", " p = x0[:]\n", " if len(plist):\n", " model.setParameterList(plist)\n", @@ -1683,7 +1733,7 @@ " p[plist] = x0\n", " else:\n", " model.requireSensitivitiesForAllParameters()\n", - " verbose and print('g: p=%s' % p)\n", + " verbose and print(\"g: p=%s\" % p)\n", "\n", " old_parameters = model.getParameters()\n", " solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n", @@ -1693,45 +1743,50 @@ "\n", " model.setParameters(old_parameters)\n", "\n", - " res = rdata['s%s' % symbol]\n", + " res = rdata[\"s%s\" % symbol]\n", " if not isinstance(res, float):\n", " if len(res.shape) == 3:\n", " res = np.sum(res, axis=(0, 2))\n", " verbose and print(res)\n", " return res\n", "\n", + "\n", "epsilon = 1e-4\n", - "err_norm = check_grad(func, grad, p_orig, 'llh', epsilon=epsilon)\n", - "print('sllh: |error|_2: %f' % err_norm)\n", + "err_norm = check_grad(func, grad, p_orig, \"llh\", epsilon=epsilon)\n", + "print(\"sllh: |error|_2: %f\" % err_norm)\n", "# assert err_norm < 1e-6\n", "print()\n", "\n", "for ip in range(model.np()):\n", " plist = [ip]\n", " p = p_orig.copy()\n", - " err_norm = check_grad(func, grad, p[plist], 'llh', p, [ip], epsilon=epsilon)\n", - " print('sllh: p[%d]: |error|_2: %f' % (ip, err_norm))\n", + " err_norm = check_grad(\n", + " func, grad, p[plist], \"llh\", p, [ip], epsilon=epsilon\n", + " )\n", + " print(\"sllh: p[%d]: |error|_2: %f\" % (ip, err_norm))\n", "\n", "print()\n", "for ip in range(model.np()):\n", " plist = [ip]\n", " p = p_orig.copy()\n", - " err_norm = check_grad(func, grad, p[plist], 'y', p, [ip], epsilon=epsilon)\n", - " print('sy: p[%d]: |error|_2: %f' % (ip, err_norm))\n", + " err_norm = check_grad(func, grad, p[plist], \"y\", p, [ip], epsilon=epsilon)\n", + " print(\"sy: p[%d]: |error|_2: %f\" % (ip, err_norm))\n", "\n", "print()\n", "for ip in range(model.np()):\n", " plist = [ip]\n", " p = p_orig.copy()\n", - " err_norm = check_grad(func, grad, p[plist], 'x', p, [ip], epsilon=epsilon)\n", - " print('sx: p[%d]: |error|_2: %f' % (ip, err_norm))\n", + " err_norm = check_grad(func, grad, p[plist], \"x\", p, [ip], epsilon=epsilon)\n", + " print(\"sx: p[%d]: |error|_2: %f\" % (ip, err_norm))\n", "\n", "print()\n", "for ip in range(model.np()):\n", " plist = [ip]\n", " p = p_orig.copy()\n", - " err_norm = check_grad(func, grad, p[plist], 'sigmay', p, [ip], epsilon=epsilon)\n", - " print('ssigmay: p[%d]: |error|_2: %f' % (ip, err_norm))\n" + " err_norm = check_grad(\n", + " func, grad, p[plist], \"sigmay\", p, [ip], epsilon=epsilon\n", + " )\n", + " print(\"ssigmay: p[%d]: |error|_2: %f\" % (ip, err_norm))" ] }, { @@ -1742,45 +1797,56 @@ }, "outputs": [], "source": [ - "eps=1e-4\n", - "op=model.getParameters()\n", + "eps = 1e-4\n", + "op = model.getParameters()\n", "\n", "\n", - "solver.setSensitivityMethod(amici.SensitivityMethod.forward) # forward sensitivity analysis\n", - "solver.setSensitivityOrder(amici.SensitivityOrder.first) # first-order sensitivities\n", + "solver.setSensitivityMethod(\n", + " amici.SensitivityMethod.forward\n", + ") # forward sensitivity analysis\n", + "solver.setSensitivityOrder(\n", + " amici.SensitivityOrder.first\n", + ") # first-order sensitivities\n", "model.requireSensitivitiesForAllParameters()\n", "solver.setRelativeTolerance(1e-12)\n", "rdata = amici.runAmiciSimulation(model, solver, edata)\n", "\n", - "def fd(x0, ip, eps, symbol='llh'):\n", + "\n", + "def fd(x0, ip, eps, symbol=\"llh\"):\n", " p = list(x0[:])\n", " old_parameters = model.getParameters()\n", " solver.setSensitivityOrder(amici.SensitivityOrder.none)\n", - " p[ip]+=eps\n", + " p[ip] += eps\n", " model.setParameters(p)\n", " rdata_f = amici.runAmiciSimulation(model, solver, edata)\n", - " p[ip]-=2*eps\n", + " p[ip] -= 2 * eps\n", " model.setParameters(p)\n", " rdata_b = amici.runAmiciSimulation(model, solver, edata)\n", "\n", " model.setParameters(old_parameters)\n", - " return (rdata_f[symbol]-rdata_b[symbol])/(2*eps)\n", + " return (rdata_f[symbol] - rdata_b[symbol]) / (2 * eps)\n", + "\n", "\n", "def plot_sensitivities(symbol, eps):\n", - " fig, axes = plt.subplots(4,2, figsize=(15,10))\n", + " fig, axes = plt.subplots(4, 2, figsize=(15, 10))\n", " for ip in range(4):\n", " fd_approx = fd(model.getParameters(), ip, eps, symbol=symbol)\n", "\n", - " axes[ip,0].plot(edata.getTimepoints(), rdata[f's{symbol}'][:,ip,:], 'r-')\n", - " axes[ip,0].plot(edata.getTimepoints(), fd_approx, 'k--')\n", - " axes[ip,0].set_ylabel(f'sensitivity {symbol}')\n", - " axes[ip,0].set_xlabel('time')\n", - "\n", - "\n", - " axes[ip,1].plot(edata.getTimepoints(), np.abs(rdata[f's{symbol}'][:,ip,:]-fd_approx), 'k-')\n", - " axes[ip,1].set_ylabel('difference to fd')\n", - " axes[ip,1].set_xlabel('time')\n", - " axes[ip,1].set_yscale('log')\n", + " axes[ip, 0].plot(\n", + " edata.getTimepoints(), rdata[f\"s{symbol}\"][:, ip, :], \"r-\"\n", + " )\n", + " axes[ip, 0].plot(edata.getTimepoints(), fd_approx, \"k--\")\n", + " axes[ip, 0].set_ylabel(f\"sensitivity {symbol}\")\n", + " axes[ip, 0].set_xlabel(\"time\")\n", + "\n", + " axes[ip, 1].plot(\n", + " edata.getTimepoints(),\n", + " np.abs(rdata[f\"s{symbol}\"][:, ip, :] - fd_approx),\n", + " \"k-\",\n", + " )\n", + " axes[ip, 1].set_ylabel(\"difference to fd\")\n", + " axes[ip, 1].set_xlabel(\"time\")\n", + " axes[ip, 1].set_yscale(\"log\")\n", "\n", " plt.tight_layout()\n", " plt.show()" @@ -1803,7 +1869,7 @@ } ], "source": [ - "plot_sensitivities('x', eps)" + "plot_sensitivities(\"x\", eps)" ] }, { @@ -1823,7 +1889,7 @@ } ], "source": [ - "plot_sensitivities('y', eps)" + "plot_sensitivities(\"y\", eps)" ] }, { @@ -1937,7 +2003,7 @@ ], "source": [ "# look at the States in rdata as DataFrame\n", - "amici.getSimulationStatesAsDataFrame(model, [edata], [rdata])\n" + "amici.getSimulationStatesAsDataFrame(model, [edata], [rdata])" ] } ], diff --git a/python/sdist/amici/__init__.py b/python/sdist/amici/__init__.py index 7160acb475..bcb7387fbf 100644 --- a/python/sdist/amici/__init__.py +++ b/python/sdist/amici/__init__.py @@ -66,9 +66,9 @@ def _imported_from_setup() -> bool: # requires the AMICI extension during its installation, but seems # unlikely... frame_path = os.path.realpath(os.path.expanduser(frame.filename)) - if frame_path == os.path.join(package_root, "setup.py") or frame_path.endswith( - f"{sep}setuptools{sep}build_meta.py" - ): + if frame_path == os.path.join( + package_root, "setup.py" + ) or frame_path.endswith(f"{sep}setuptools{sep}build_meta.py"): return True return False @@ -203,6 +203,8 @@ def _get_default_argument(func: Callable, arg: str) -> Any: import inspect signature = inspect.signature(func) - if (default := signature.parameters[arg].default) is not inspect.Parameter.empty: + if ( + default := signature.parameters[arg].default + ) is not inspect.Parameter.empty: return default raise ValueError(f"No default value for argument {arg} of {func}.") diff --git a/python/sdist/amici/__main__.py b/python/sdist/amici/__main__.py index b8fbc77c0f..165f5d9516 100644 --- a/python/sdist/amici/__main__.py +++ b/python/sdist/amici/__main__.py @@ -21,7 +21,9 @@ def print_info(): if hdf5_enabled: features.append("HDF5") - print(f"AMICI ({sys.platform}) version {__version__} ({','.join(features)})") + print( + f"AMICI ({sys.platform}) version {__version__} ({','.join(features)})" + ) if __name__ == "__main__": diff --git a/python/sdist/amici/conserved_quantities_demartino.py b/python/sdist/amici/conserved_quantities_demartino.py index c579558f71..5b04fa1479 100644 --- a/python/sdist/amici/conserved_quantities_demartino.py +++ b/python/sdist/amici/conserved_quantities_demartino.py @@ -60,7 +60,9 @@ def compute_moiety_conservation_laws( if not done: # construct interaction matrix - J, J2, fields = _fill(stoichiometric_list, engaged_species, num_species) + J, J2, fields = _fill( + stoichiometric_list, engaged_species, num_species + ) # seed random number generator if rng_seed is not False: @@ -87,7 +89,10 @@ def compute_moiety_conservation_laws( if timer == max_num_monte_carlo: done = _relax( - stoichiometric_list, conserved_moieties, num_reactions, num_species + stoichiometric_list, + conserved_moieties, + num_reactions, + num_species, ) timer = 0 _reduce(int_kernel_dim, cls_species_idxs, cls_coefficients, num_species) @@ -139,14 +144,23 @@ def log(*args, **kwargs): if not engaged_species_idxs: continue log( - f"Moiety number {i + 1} engages {len(engaged_species_idxs)} " "species:" + f"Moiety number {i + 1} engages {len(engaged_species_idxs)} " + "species:" ) - for species_idx, coefficient in zip(engaged_species_idxs, coefficients): - name = species_names[species_idx] if species_names else species_idx + for species_idx, coefficient in zip( + engaged_species_idxs, coefficients + ): + name = ( + species_names[species_idx] + if species_names + else species_idx + ) log(f"\t{name}\t{coefficient}") -def _qsort(k: int, km: int, order: MutableSequence[int], pivots: Sequence[int]) -> None: +def _qsort( + k: int, km: int, order: MutableSequence[int], pivots: Sequence[int] +) -> None: """Quicksort Recursive implementation of the quicksort algorithm @@ -230,7 +244,9 @@ def _kernel( matrix2[i].append(1) order: List[int] = list(range(num_species)) - pivots = [matrix[i][0] if len(matrix[i]) else _MAX for i in range(num_species)] + pivots = [ + matrix[i][0] if len(matrix[i]) else _MAX for i in range(num_species) + ] done = False while not done: @@ -241,7 +257,8 @@ def _kernel( if len(matrix[order[j]]) > 1: for i in range(len(matrix[order[j]])): min1 = min( - min1, abs(matrix2[order[j]][0] / matrix2[order[j]][i]) + min1, + abs(matrix2[order[j]][0] / matrix2[order[j]][i]), ) min2 = _MAX @@ -249,7 +266,10 @@ def _kernel( for i in range(len(matrix[order[j + 1]])): min2 = min( min2, - abs(matrix2[order[j + 1]][0] / matrix2[order[j + 1]][i]), + abs( + matrix2[order[j + 1]][0] + / matrix2[order[j + 1]][i] + ), ) if min2 > min1: @@ -289,7 +309,9 @@ def _kernel( kernel_dim = 0 for i in range(num_species): - done = all(matrix[i][j] >= num_reactions for j in range(len(matrix[i]))) + done = all( + matrix[i][j] >= num_reactions for j in range(len(matrix[i])) + ) if done and len(matrix[i]): for j in range(len(matrix[i])): RSolutions[kernel_dim].append(matrix[i][j] - num_reactions) @@ -330,7 +352,8 @@ def _kernel( assert int_kernel_dim <= kernel_dim assert len(cls_species_idxs) == len(cls_coefficients), ( - "Inconsistent number of conserved quantities in coefficients and " "species" + "Inconsistent number of conserved quantities in coefficients and " + "species" ) return ( kernel_dim, @@ -343,7 +366,9 @@ def _kernel( def _fill( - stoichiometric_list: Sequence[float], matched: Sequence[int], num_species: int + stoichiometric_list: Sequence[float], + matched: Sequence[int], + num_species: int, ) -> Tuple[List[List[int]], List[List[int]], List[int]]: """Construct interaction matrix @@ -460,14 +485,18 @@ def _is_linearly_dependent( if len(matrix[order[j]]) > 1: for i in range(len(matrix[order[j]])): min1 = min( - min1, abs(matrix2[order[j]][0] / matrix2[order[j]][i]) + min1, + abs(matrix2[order[j]][0] / matrix2[order[j]][i]), ) min2 = _MAX if len(matrix[order[j + 1]]) > 1: for i in range(len(matrix[order[j + 1]])): min2 = min( min2, - abs(matrix2[order[j + 1]][0] / matrix2[order[j + 1]][i]), + abs( + matrix2[order[j + 1]][0] + / matrix2[order[j + 1]][i] + ), ) if min2 > min1: # swap @@ -549,7 +578,9 @@ def _monte_carlo( considered otherwise the algorithm retries Monte Carlo up to max_iter """ dim = len(matched) - num = [int(2 * random.uniform(0, 1)) if len(J[i]) else 0 for i in range(dim)] + num = [ + int(2 * random.uniform(0, 1)) if len(J[i]) else 0 for i in range(dim) + ] numtot = sum(num) def compute_h(): @@ -611,7 +642,12 @@ def compute_h(): # founds MCLS? need to check for linear independence if len(int_matched) and not _is_linearly_dependent( - num, int_kernel_dim, cls_species_idxs, cls_coefficients, matched, num_species + num, + int_kernel_dim, + cls_species_idxs, + cls_coefficients, + matched, + num_species, ): logger.debug("Found a moiety but it is linearly dependent... next.") return False, int_kernel_dim, int_matched @@ -708,14 +744,18 @@ def _relax( if len(matrix[order[j]]) > 1: for i in range(len(matrix[order[j]])): min1 = min( - min1, abs(matrix2[order[j]][0] / matrix2[order[j]][i]) + min1, + abs(matrix2[order[j]][0] / matrix2[order[j]][i]), ) min2 = _MAX if len(matrix[order[j + 1]]) > 1: for i in range(len(matrix[order[j + 1]])): min2 = min( min2, - abs(matrix2[order[j + 1]][0] / matrix2[order[j + 1]][i]), + abs( + matrix2[order[j + 1]][0] + / matrix2[order[j + 1]][i] + ), ) if min2 > min1: # swap @@ -774,7 +814,9 @@ def _relax( row_k[matrix[j][a]] -= matrix2[j][a] * matrix2[k][i] # filter matrix[k] = [ - row_idx for row_idx, row_val in enumerate(row_k) if row_val != 0 + row_idx + for row_idx, row_val in enumerate(row_k) + if row_val != 0 ] matrix2[k] = [row_val for row_val in row_k if row_val != 0] diff --git a/python/sdist/amici/custom_commands.py b/python/sdist/amici/custom_commands.py index 2e69800fc7..d54060a009 100644 --- a/python/sdist/amici/custom_commands.py +++ b/python/sdist/amici/custom_commands.py @@ -171,7 +171,8 @@ def build_extension(self, ext: CMakeExtension) -> None: build_dir = self.build_lib if self.inplace == 0 else os.getcwd() build_dir = Path(build_dir).absolute().as_posix() ext.cmake_configure_options = [ - x.replace("${build_dir}", build_dir) for x in ext.cmake_configure_options + x.replace("${build_dir}", build_dir) + for x in ext.cmake_configure_options ] super().build_extension(ext) diff --git a/python/sdist/amici/cxxcodeprinter.py b/python/sdist/amici/cxxcodeprinter.py index 3055518c5b..e6e377b331 100644 --- a/python/sdist/amici/cxxcodeprinter.py +++ b/python/sdist/amici/cxxcodeprinter.py @@ -68,7 +68,9 @@ def doprint(self, expr: sp.Expr, assign_to: Optional[str] = None) -> str: def _print_min_max(self, expr, cpp_fun: str, sympy_fun): # C++ doesn't like mixing int and double for arguments for min/max, # therefore, we just always convert to float - arg0 = sp.Float(expr.args[0]) if expr.args[0].is_number else expr.args[0] + arg0 = ( + sp.Float(expr.args[0]) if expr.args[0].is_number else expr.args[0] + ) if len(expr.args) == 1: return self._print(arg0) return "%s%s(%s, %s)" % ( @@ -108,13 +110,18 @@ def _get_sym_lines_array( C++ code as list of lines """ return [ - " " * indent_level + f"{variable}[{index}] = " f"{self.doprint(math)};" + " " * indent_level + f"{variable}[{index}] = " + f"{self.doprint(math)};" for index, math in enumerate(equations) if math not in [0, 0.0] ] def _get_sym_lines_symbols( - self, symbols: sp.Matrix, equations: sp.Matrix, variable: str, indent_level: int + self, + symbols: sp.Matrix, + equations: sp.Matrix, + variable: str, + indent_level: int, ) -> List[str]: """ Generate C++ code for where array elements are directly replaced with @@ -146,7 +153,9 @@ def format_regular_line(symbol, math, index): if self.extract_cse: # Extract common subexpressions cse_sym_prefix = "__amici_cse_" - symbol_generator = numbered_symbols(cls=sp.Symbol, prefix=cse_sym_prefix) + symbol_generator = numbered_symbols( + cls=sp.Symbol, prefix=cse_sym_prefix + ) replacements, reduced_exprs = sp.cse( equations, symbols=symbol_generator, @@ -162,7 +171,9 @@ def format_regular_line(symbol, math, index): sorted_symbols = toposort( { identifier: { - s for s in definition.free_symbols if s in expr_dict + s + for s in definition.free_symbols + if s in expr_dict } for (identifier, definition) in expr_dict.items() } @@ -178,7 +189,9 @@ def format_line(symbol: sp.Symbol): f"= {self.doprint(math)};" ) elif math not in [0, 0.0]: - return format_regular_line(symbol, math, symbol_to_idx[symbol]) + return format_regular_line( + symbol, math, symbol_to_idx[symbol] + ) return [ line @@ -247,7 +260,9 @@ def csc_matrix( symbol_row_vals.append(row) idx += 1 - symbol_name = f"d{rownames[row].name}" f"_d{colnames[col].name}" + symbol_name = ( + f"d{rownames[row].name}" f"_d{colnames[col].name}" + ) if identifier: symbol_name += f"_{identifier}" symbol_list.append(symbol_name) @@ -267,7 +282,13 @@ def csc_matrix( else: sparse_list = sp.Matrix(sparse_list) - return symbol_col_ptrs, symbol_row_vals, sparse_list, symbol_list, sparse_matrix + return ( + symbol_col_ptrs, + symbol_row_vals, + sparse_list, + symbol_list, + sparse_matrix, + ) @staticmethod def print_bool(expr) -> str: diff --git a/python/sdist/amici/de_export.py b/python/sdist/amici/de_export.py index de1b96a1c6..b1fa02c421 100644 --- a/python/sdist/amici/de_export.py +++ b/python/sdist/amici/de_export.py @@ -27,12 +27,12 @@ Callable, Dict, List, + Literal, Optional, Sequence, Set, Tuple, Union, - Literal ) import numpy as np @@ -70,9 +70,13 @@ # Template for model simulation main.cpp file CXX_MAIN_TEMPLATE_FILE = os.path.join(amiciSrcPath, "main.template.cpp") # Template for model/swig/CMakeLists.txt -SWIG_CMAKE_TEMPLATE_FILE = os.path.join(amiciSwigPath, "CMakeLists_model.cmake") +SWIG_CMAKE_TEMPLATE_FILE = os.path.join( + amiciSwigPath, "CMakeLists_model.cmake" +) # Template for model/CMakeLists.txt -MODEL_CMAKE_TEMPLATE_FILE = os.path.join(amiciSrcPath, "CMakeLists.template.cmake") +MODEL_CMAKE_TEMPLATE_FILE = os.path.join( + amiciSrcPath, "CMakeLists.template.cmake" +) IDENTIFIER_PATTERN = re.compile(r"^[a-zA-Z_]\w*$") DERIVATIVE_PATTERN = re.compile(r"^d(x_rdata|xdot|\w+?)d(\w+?)(?:_explicit)?$") @@ -285,7 +289,8 @@ def arguments(self, ode: bool = True) -> str: " const realtype *k, const int ip", ), "sigmaz": _FunctionInfo( - "realtype *sigmaz, const realtype t, const realtype *p, " "const realtype *k", + "realtype *sigmaz, const realtype t, const realtype *p, " + "const realtype *k", ), "sroot": _FunctionInfo( "realtype *stau, const realtype t, const realtype *x, " @@ -326,7 +331,8 @@ def arguments(self, ode: bool = True) -> str: assume_pow_positivity=True, ), "x0": _FunctionInfo( - "realtype *x0, const realtype t, const realtype *p, " "const realtype *k" + "realtype *x0, const realtype t, const realtype *p, " + "const realtype *k" ), "x0_fixedParameters": _FunctionInfo( "realtype *x0_fixedParameters, const realtype t, " @@ -938,7 +944,9 @@ def states(self) -> List[State]: @log_execution_time("importing SbmlImporter", logger) def import_from_sbml_importer( - self, si: "sbml_import.SbmlImporter", compute_cls: Optional[bool] = True + self, + si: "sbml_import.SbmlImporter", + compute_cls: Optional[bool] = True, ) -> None: """ Imports a model specification from a @@ -1013,15 +1021,21 @@ def transform_dxdt_to_concentration(species_id, dxdt): # we need to flatten out assignments in the compartment in # order to ensure that we catch all species dependencies - v = smart_subs_dict(v, si.symbols[SymbolId.EXPRESSION], "value") + v = smart_subs_dict( + v, si.symbols[SymbolId.EXPRESSION], "value" + ) dv_dt = v.diff(amici_time_symbol) # we may end up with a time derivative of the compartment # volume due to parameter rate rules comp_rate_vars = [ - p for p in v.free_symbols if p in si.symbols[SymbolId.SPECIES] + p + for p in v.free_symbols + if p in si.symbols[SymbolId.SPECIES] ] for var in comp_rate_vars: - dv_dt += v.diff(var) * si.symbols[SymbolId.SPECIES][var]["dt"] + dv_dt += ( + v.diff(var) * si.symbols[SymbolId.SPECIES][var]["dt"] + ) dv_dx = v.diff(species_id) xdot = (dxdt - dv_dt * species_id) / (dv_dx * species_id + v) return xdot @@ -1040,7 +1054,9 @@ def transform_dxdt_to_concentration(species_id, dxdt): return dxdt / v # create dynamics without respecting conservation laws first - dxdt = smart_multiply(si.stoichiometric_matrix, MutableDenseMatrix(fluxes)) + dxdt = smart_multiply( + si.stoichiometric_matrix, MutableDenseMatrix(fluxes) + ) for ix, ((species_id, species), formula) in enumerate( zip(symbols[SymbolId.SPECIES].items(), dxdt) ): @@ -1050,7 +1066,9 @@ def transform_dxdt_to_concentration(species_id, dxdt): if species["amount"]: species["dt"] = formula else: - species["dt"] = transform_dxdt_to_concentration(species_id, formula) + species["dt"] = transform_dxdt_to_concentration( + species_id, formula + ) # create all basic components of the DE model and add them. for symbol_name in symbols: @@ -1098,11 +1116,14 @@ def transform_dxdt_to_concentration(species_id, dxdt): # fill in 'self._sym' based on prototypes and components in ode_model self.generate_basic_variables() self._has_quadratic_nllh = all( - llh["dist"] in ["normal", "lin-normal", "log-normal", "log10-normal"] + llh["dist"] + in ["normal", "lin-normal", "log-normal", "log10-normal"] for llh in si.symbols[SymbolId.LLHY].values() ) - self._process_sbml_rate_of(symbols) # substitute SBML-rateOf constructs + self._process_sbml_rate_of( + symbols + ) # substitute SBML-rateOf constructs def _process_sbml_rate_of(self, symbols) -> None: """Substitute any SBML-rateOf constructs in the model equations""" @@ -1142,7 +1163,10 @@ def get_rate(symbol: sp.Symbol): for i_state in range(len(self.eq("x0"))): if rate_ofs := self._eqs["x0"][i_state].find(rate_of_func): self._eqs["x0"][i_state] = self._eqs["x0"][i_state].subs( - {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs} + { + rate_of: get_rate(rate_of.args[0]) + for rate_of in rate_ofs + } ) for component in chain( @@ -1169,7 +1193,10 @@ def get_rate(symbol: sp.Symbol): component.set_val( component.get_val().subs( - {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs} + { + rate_of: get_rate(rate_of.args[0]) + for rate_of in rate_ofs + } ) ) @@ -1265,14 +1292,21 @@ def add_conservation_law( )[0] except StopIteration: raise ValueError( - f"Specified state {state} was not found in the " f"model states." + f"Specified state {state} was not found in the " + f"model states." ) state_id = self._differential_states[ix].get_id() # \sum_{i≠j}(a_i * x_i)/a_j target_expression = ( - sp.Add(*(c_i * x_i for x_i, c_i in coefficients.items() if x_i != state)) + sp.Add( + *( + c_i * x_i + for x_i, c_i in coefficients.items() + if x_i != state + ) + ) / coefficients[state] ) @@ -1469,7 +1503,9 @@ def sparseeq(self, name) -> sp.Matrix: self._generate_sparse_symbol(name) return self._sparseeqs[name] - def colptrs(self, name: str) -> Union[List[sp.Number], List[List[sp.Number]]]: + def colptrs( + self, name: str + ) -> Union[List[sp.Number], List[List[sp.Number]]]: """ Returns (and constructs if necessary) the column pointers for a sparsified symbolic variable. @@ -1486,7 +1522,9 @@ def colptrs(self, name: str) -> Union[List[sp.Number], List[List[sp.Number]]]: self._generate_sparse_symbol(name) return self._colptrs[name] - def rowvals(self, name: str) -> Union[List[sp.Number], List[List[sp.Number]]]: + def rowvals( + self, name: str + ) -> Union[List[sp.Number], List[List[sp.Number]]]: """ Returns (and constructs if necessary) the row values for a sparsified symbolic variable. @@ -1554,7 +1592,9 @@ def _generate_symbol(self, name: str) -> None: """ if name in self._variable_prototype: components = self._variable_prototype[name]() - self._syms[name] = sp.Matrix([comp.get_id() for comp in components]) + self._syms[name] = sp.Matrix( + [comp.get_id() for comp in components] + ) if name == "y": self._syms["my"] = sp.Matrix( [comp.get_measurement_symbol() for comp in components] @@ -1738,7 +1778,9 @@ def get_appearance_counts(self, idxs: List[int]) -> List[int]: return [ free_symbols_dt.count(str(self._differential_states[idx].get_id())) - + free_symbols_expr.count(str(self._differential_states[idx].get_id())) + + free_symbols_expr.count( + str(self._differential_states[idx].get_id()) + ) for idx in idxs ] @@ -1823,7 +1865,9 @@ def _compute_equation(self, name: str) -> None: time_symbol = sp.Matrix([amici_time_symbol]) if name in self._equation_prototype: - self._equation_from_components(name, self._equation_prototype[name]()) + self._equation_from_components( + name, self._equation_prototype[name]() + ) elif name in self._total_derivative_prototypes: args = self._total_derivative_prototypes[name] @@ -1913,7 +1957,9 @@ def _compute_equation(self, name: str) -> None: if any(sym in eq.free_symbols for sym in k) ] eq = self.eq("x0") - self._eqs[name] = sp.Matrix([eq[ix] for ix in self._x0_fixedParameters_idx]) + self._eqs[name] = sp.Matrix( + [eq[ix] for ix in self._x0_fixedParameters_idx] + ) elif name == "dtotal_cldx_rdata": x_rdata = self.sym("x_rdata") @@ -1926,7 +1972,9 @@ def _compute_equation(self, name: str) -> None: elif name == "dtcldx": # this is always zero - self._eqs[name] = sp.zeros(self.num_cons_law(), self.num_states_solver()) + self._eqs[name] = sp.zeros( + self.num_cons_law(), self.num_states_solver() + ) elif name == "dtcldp": # force symbols @@ -1951,15 +1999,21 @@ def _compute_equation(self, name: str) -> None: elif name == "dx_rdatadp": if self.num_cons_law(): - self._eqs[name] = smart_jacobian(self.eq("x_rdata"), self.sym("p")) + self._eqs[name] = smart_jacobian( + self.eq("x_rdata"), self.sym("p") + ) else: # so far, dx_rdatadp is only required for sx_rdata # in case of no conservation laws, C++ code will directly use # sx, we don't need this - self._eqs[name] = sp.zeros(self.num_states_rdata(), self.num_par()) + self._eqs[name] = sp.zeros( + self.num_states_rdata(), self.num_par() + ) elif name == "dx_rdatadtcl": - self._eqs[name] = smart_jacobian(self.eq("x_rdata"), self.sym("tcl")) + self._eqs[name] = smart_jacobian( + self.eq("x_rdata"), self.sym("tcl") + ) elif name == "dxdotdx_explicit": # force symbols @@ -2022,7 +2076,9 @@ def _compute_equation(self, name: str) -> None: self._eqs[name] = event_eqs elif name == "z": - event_observables = [sp.zeros(self.num_eventobs(), 1) for _ in self._events] + event_observables = [ + sp.zeros(self.num_eventobs(), 1) for _ in self._events + ] event_ids = [e.get_id() for e in self._events] # TODO: get rid of this stupid 1-based indexing as soon as we can # the matlab interface @@ -2030,7 +2086,9 @@ def _compute_equation(self, name: str) -> None: event_ids.index(event_obs.get_event()) + 1 for event_obs in self._event_observables ] - for (iz, ie), event_obs in zip(enumerate(z2event), self._event_observables): + for (iz, ie), event_obs in zip( + enumerate(z2event), self._event_observables + ): event_observables[ie - 1][iz] = event_obs.get_val() self._eqs[name] = event_observables @@ -2048,7 +2106,10 @@ def _compute_equation(self, name: str) -> None: ] if name == "dzdx": for ie in range(self.num_events()): - dtaudx = -self.eq("drootdx")[ie, :] / self.eq("drootdt_total")[ie] + dtaudx = ( + -self.eq("drootdx")[ie, :] + / self.eq("drootdt_total")[ie] + ) for iz in range(self.num_eventobs()): if ie != self._z2event[iz] - 1: continue @@ -2119,7 +2180,9 @@ def _compute_equation(self, name: str) -> None: ) # finish chain rule for the state variables - tmp_eq += smart_multiply(self.eq("ddeltaxdx")[ie], tmp_dxdp) + tmp_eq += smart_multiply( + self.eq("ddeltaxdx")[ie], tmp_dxdp + ) event_eqs.append(tmp_eq) @@ -2138,7 +2201,9 @@ def _compute_equation(self, name: str) -> None: # that we need to reverse the order here for cl in reversed(self._conservation_laws) ] - ).col_join(smart_jacobian(self.eq("w")[self.num_cons_law() :, :], x)) + ).col_join( + smart_jacobian(self.eq("w")[self.num_cons_law() :, :], x) + ) elif match_deriv: self._derivative(match_deriv[1], match_deriv[2], name) @@ -2212,7 +2277,10 @@ def _derivative(self, eq: str, var: str, name: str = None) -> None: and cv not in self._lock_total_derivative and var != cv and min(self.sym(cv).shape) - and ((eq, var) not in ignore_chainrule or ignore_chainrule[(eq, var)] != cv) + and ( + (eq, var) not in ignore_chainrule + or ignore_chainrule[(eq, var)] != cv + ) ] if len(chainvars): self._lock_total_derivative += chainvars @@ -2315,9 +2383,14 @@ def _total_derivative( dxdz = self.sym_or_eq(name, dxdz_name) # Save time for large models if one multiplicand is zero, # which is not checked for by sympy - if not smart_is_zero_matrix(dydx) and not smart_is_zero_matrix(dxdz): + if not smart_is_zero_matrix(dydx) and not smart_is_zero_matrix( + dxdz + ): dydx_times_dxdz = smart_multiply(dydx, dxdz) - if dxdz.shape[1] == 1 and self._eqs[name].shape[1] != dxdz.shape[1]: + if ( + dxdz.shape[1] == 1 + and self._eqs[name].shape[1] != dxdz.shape[1] + ): for iz in range(self._eqs[name].shape[1]): self._eqs[name][:, iz] += dydx_times_dxdz else: @@ -2343,7 +2416,9 @@ def sym_or_eq(self, name: str, varname: str) -> sp.Matrix: # within a column may differ from the initialization of symbols here, # so those are not safe to use. Not removing them from signature as # this would break backwards compatibility. - if var_in_function_signature(name, varname, self.is_ode()) and varname not in [ + if var_in_function_signature( + name, varname, self.is_ode() + ) and varname not in [ "dwdx", "dwdp", ]: @@ -2467,7 +2542,8 @@ def state_has_fixed_parameter_initial_condition(self, ix: int) -> bool: if not isinstance(ic, sp.Basic): return False return any( - fp in (c.get_id() for c in self._constants) for fp in ic.free_symbols + fp in (c.get_id() for c in self._constants) + for fp in ic.free_symbols ) def state_has_conservation_law(self, ix: int) -> bool: @@ -2785,7 +2861,9 @@ def __init__( # include/amici/model.h for details) self.model: DEModel = de_model self.model._code_printer.known_functions.update( - splines.spline_user_functions(self.model.splines, self._get_index("p")) + splines.spline_user_functions( + self.model.splines, self._get_index("p") + ) ) # To only generate a subset of functions, apply subselection here @@ -2801,7 +2879,9 @@ def generate_model_code(self) -> None: Generates the native C++ code for the loaded model and a Matlab script that can be run to compile a mex file from the C++ code """ - with _monkeypatched(sp.Pow, "_eval_derivative", _custom_pow_eval_derivative): + with _monkeypatched( + sp.Pow, "_eval_derivative", _custom_pow_eval_derivative + ): self._prepare_model_folder() self._generate_c_code() self._generate_m_code() @@ -2863,7 +2943,9 @@ def _generate_c_code(self) -> None: self._write_swig_files() self._write_module_setup() - shutil.copy(CXX_MAIN_TEMPLATE_FILE, os.path.join(self.model_path, "main.cpp")) + shutil.copy( + CXX_MAIN_TEMPLATE_FILE, os.path.join(self.model_path, "main.cpp") + ) def _compile_c_code( self, @@ -2944,8 +3026,10 @@ def _generate_m_code(self) -> None: lines = [ "% This compile script was automatically created from" " Python SBML import.", - "% If mex compiler is set up within MATLAB, it can be run" " from MATLAB ", - "% in order to compile a mex-file from the Python" " generated C++ files.", + "% If mex compiler is set up within MATLAB, it can be run" + " from MATLAB ", + "% in order to compile a mex-file from the Python" + " generated C++ files.", "", f"modelName = '{self.model_name}';", "amimodel.compileAndLinkModel(modelName, '', [], [], [], []);", @@ -2977,7 +3061,10 @@ def _get_index(self, name: str) -> Dict[sp.Symbol, int]: else: raise ValueError(f"Unknown symbolic array: {name}") - return {strip_pysb(symbol).name: index for index, symbol in enumerate(symbols)} + return { + strip_pysb(symbol).name: index + for index, symbol in enumerate(symbols) + } def _write_index_files(self, name: str) -> None: """ @@ -3032,7 +3119,8 @@ def _write_function_file(self, function: str) -> None: if function in sparse_functions: equations = self.model.sparseeq(function) elif ( - not self.allow_reinit_fixpar_initcond and function == "sx0_fixedParameters" + not self.allow_reinit_fixpar_initcond + and function == "sx0_fixedParameters" ): # Not required. Will create empty function body. equations = sp.Matrix() @@ -3059,17 +3147,22 @@ def _write_function_file(self, function: str) -> None: lines = [] # function header - lines.extend([ - '#include "amici/symbolic_functions.h"', - '#include "amici/defines.h"', - '#include "sundials/sundials_types.h"', - "", - "#include ", - "#include ", - "", - ]) + lines.extend( + [ + '#include "amici/symbolic_functions.h"', + '#include "amici/defines.h"', + '#include "sundials/sundials_types.h"', + "", + "#include ", + "#include ", + "", + ] + ) if function == "create_splines": - lines += ['#include "amici/splinefunctions.h"', "#include "] + lines += [ + '#include "amici/splinefunctions.h"', + "#include ", + ] func_info = self.functions[function] @@ -3093,14 +3186,21 @@ def _write_function_file(self, function: str) -> None: if iszero and not ( (sym == "y" and "Jy" in function) - or (sym == "w" and "xdot" in function and len(self.model.sym(sym))) + or ( + sym == "w" + and "xdot" in function + and len(self.model.sym(sym)) + ) ): continue lines.append(f'#include "{sym}.h"') # include return symbols - if function in self.model.sym_names() and function not in non_unique_id_symbols: + if ( + function in self.model.sym_names() + and function not in non_unique_id_symbols + ): lines.append(f'#include "{function}.h"') lines.extend( @@ -3119,7 +3219,10 @@ def _write_function_file(self, function: str) -> None: body = [ # execute this twice to catch cases where the ending '(' would # be the starting (^|\W) for the following match - pow_rx.sub(r"\1amici::pos_pow(", pow_rx.sub(r"\1amici::pos_pow(", line)) + pow_rx.sub( + r"\1amici::pos_pow(", + pow_rx.sub(r"\1amici::pos_pow(", line), + ) for line in body ] @@ -3148,7 +3251,7 @@ def _write_function_file(self, function: str) -> None: fileout.write("\n".join(lines)) def _generate_function_index( - self, function: str, indextype: Literal["colptrs", "rowvals"] + self, function: str, indextype: Literal["colptrs", "rowvals"] ) -> List[str]: """ Generate equations and C++ code for the function ``function``. @@ -3249,7 +3352,9 @@ def _generate_function_index( return lines - def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]: + def _get_function_body( + self, function: str, equations: sp.Matrix + ) -> List[str]: """ Generate C++ code for body of function ``function``. @@ -3288,7 +3393,9 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]: + str(len(self.model._x0_fixedParameters_idx)) + "> _x0_fixedParameters_idxs = {", " " - + ", ".join(str(x) for x in self.model._x0_fixedParameters_idx), + + ", ".join( + str(x) for x in self.model._x0_fixedParameters_idx + ), " };", "", # Set all parameters that are to be reset to 0, so that the @@ -3325,7 +3432,9 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]: lines.extend(get_switch_statement("ip", cases, 1)) elif function == "x0_fixedParameters": - for index, formula in zip(self.model._x0_fixedParameters_idx, equations): + for index, formula in zip( + self.model._x0_fixedParameters_idx, equations + ): lines.append( f" if(std::find(reinitialization_state_idxs.cbegin(), " f"reinitialization_state_idxs.cend(), {index}) != " @@ -3359,7 +3468,10 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]: outer_cases[ie] = copy.copy(inner_lines) lines.extend(get_switch_statement("ie", outer_cases, 1)) - elif function in sensi_functions and equations.shape[1] == self.model.num_par(): + elif ( + function in sensi_functions + and equations.shape[1] == self.model.num_par() + ): cases = { ipar: self.model._code_printer._get_sym_lines_array( equations[:, ipar], function, 0 @@ -3392,7 +3504,8 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]: lines.extend(get_switch_statement(iterator, cases, 1)) elif ( - function in self.model.sym_names() and function not in non_unique_id_symbols + function in self.model.sym_names() + and function not in non_unique_id_symbols ): if function in sparse_functions: symbols = list(map(sp.Symbol, self.model.sparsesym(function))) @@ -3419,19 +3532,21 @@ def _get_create_splines_body(self): body = ["return {"] for ispl, spline in enumerate(self.model.splines): if isinstance(spline.nodes, splines.UniformGrid): - nodes = f"{ind8}{{{spline.nodes.start}, {spline.nodes.stop}}}, " + nodes = ( + f"{ind8}{{{spline.nodes.start}, {spline.nodes.stop}}}, " + ) else: nodes = f"{ind8}{{{', '.join(map(str, spline.nodes))}}}, " # vector with the node values - values = f"{ind8}{{{', '.join(map(str, spline.values_at_nodes))}}}, " + values = ( + f"{ind8}{{{', '.join(map(str, spline.values_at_nodes))}}}, " + ) # vector with the slopes if spline.derivatives_by_fd: slopes = f"{ind8}{{}}," else: - slopes = ( - f"{ind8}{{{', '.join(map(str, spline.derivatives_at_nodes))}}}," - ) + slopes = f"{ind8}{{{', '.join(map(str, spline.derivatives_at_nodes))}}}," body.extend( [ @@ -3454,7 +3569,8 @@ def _get_create_splines_body(self): body.append(ind8 + bc_to_cpp[bc]) except KeyError: raise ValueError( - f"Unknown boundary condition '{bc}' " "found in spline object" + f"Unknown boundary condition '{bc}' " + "found in spline object" ) extrapolate_to_cpp = { None: "SplineExtrapolation::noExtrapolation, ", @@ -3468,12 +3584,15 @@ def _get_create_splines_body(self): body.append(ind8 + extrapolate_to_cpp[extr]) except KeyError: raise ValueError( - f"Unknown extrapolation '{extr}' " "found in spline object" + f"Unknown extrapolation '{extr}' " + "found in spline object" ) line = ind8 line += "true, " if spline.derivatives_by_fd else "false, " line += ( - "true, " if isinstance(spline.nodes, splines.UniformGrid) else "false, " + "true, " + if isinstance(spline.nodes, splines.UniformGrid) + else "false, " ) line += "true" if spline.logarithmic_parametrization else "false" body.append(line) @@ -3552,10 +3671,12 @@ def _write_model_header_cpp(self) -> None: "NK": self.model.num_const(), "O2MODE": "amici::SecondOrderMode::none", # using code printer ensures proper handling of nan/inf - "PARAMETERS": self.model._code_printer.doprint(self.model.val("p"))[1:-1], - "FIXED_PARAMETERS": self.model._code_printer.doprint(self.model.val("k"))[ - 1:-1 - ], + "PARAMETERS": self.model._code_printer.doprint( + self.model.val("p") + )[1:-1], + "FIXED_PARAMETERS": self.model._code_printer.doprint( + self.model.val("k") + )[1:-1], "PARAMETER_NAMES_INITIALIZER_LIST": self._get_symbol_name_initializer_list( "p" ), @@ -3570,12 +3691,16 @@ def _write_model_header_cpp(self) -> None: ), "OBSERVABLE_TRAFO_INITIALIZER_LIST": "\n".join( f"ObservableScaling::{trafo.value}, // y[{idx}]" - for idx, trafo in enumerate(self.model.get_observable_transformations()) + for idx, trafo in enumerate( + self.model.get_observable_transformations() + ) ), "EXPRESSION_NAMES_INITIALIZER_LIST": self._get_symbol_name_initializer_list( "w" ), - "PARAMETER_IDS_INITIALIZER_LIST": self._get_symbol_id_initializer_list("p"), + "PARAMETER_IDS_INITIALIZER_LIST": self._get_symbol_id_initializer_list( + "p" + ), "STATE_IDS_INITIALIZER_LIST": self._get_symbol_id_initializer_list( "x_rdata" ), @@ -3656,16 +3781,22 @@ def _write_model_header_cpp(self) -> None: indexfield, nobody=True, ) - tpl_data[f"{func_name.upper()}_{indexfield.upper()}_DEF"] = "" + tpl_data[ + f"{func_name.upper()}_{indexfield.upper()}_DEF" + ] = "" tpl_data[ f"{func_name.upper()}_{indexfield.upper()}_IMPL" ] = impl continue - tpl_data[f"{func_name.upper()}_DEF"] = get_function_extern_declaration( + tpl_data[ + f"{func_name.upper()}_DEF" + ] = get_function_extern_declaration( func_name, self.model_name, self.model.is_ode() ) - tpl_data[f"{func_name.upper()}_IMPL"] = get_model_override_implementation( + tpl_data[ + f"{func_name.upper()}_IMPL" + ] = get_model_override_implementation( func_name, self.model_name, self.model.is_ode() ) if func_name in sparse_functions: @@ -3896,7 +4027,9 @@ def get_function_extern_declaration(fun: str, name: str, ode: bool) -> str: return f"extern {f.return_type} {fun}_{name}({f.arguments(ode)});" -def get_sunindex_extern_declaration(fun: str, name: str, indextype: str) -> str: +def get_sunindex_extern_declaration( + fun: str, name: str, indextype: str +) -> str: """ Constructs the function declaration for an index function of a given function @@ -4095,7 +4228,8 @@ def _custom_pow_eval_derivative(self, s): return part1 + part2 return part1 + sp.Piecewise( - (self.base, sp.And(sp.Eq(self.base, 0), sp.Eq(dbase, 0))), (part2, True) + (self.base, sp.And(sp.Eq(self.base, 0), sp.Eq(dbase, 0))), + (part2, True), ) diff --git a/python/sdist/amici/de_model.py b/python/sdist/amici/de_model.py index c5363511e7..77d9013ad2 100644 --- a/python/sdist/amici/de_model.py +++ b/python/sdist/amici/de_model.py @@ -67,7 +67,8 @@ def __init__( hasattr(identifier, "name") and identifier.name in RESERVED_SYMBOLS ): raise ValueError( - f'Cannot add model quantity with name "{name}", ' f"please rename." + f'Cannot add model quantity with name "{name}", ' + f"please rename." ) self._identifier: sp.Symbol = identifier @@ -301,7 +302,9 @@ class DifferentialState(State): """ - def __init__(self, identifier: sp.Symbol, name: str, init: sp.Expr, dt: sp.Expr): + def __init__( + self, identifier: sp.Symbol, name: str, init: sp.Expr, dt: sp.Expr + ): """ Create a new State instance. Extends :meth:`ModelQuantity.__init__` by ``dt`` @@ -335,7 +338,8 @@ def set_conservation_law(self, law: ConservationLaw) -> None: """ if not isinstance(law, ConservationLaw): raise TypeError( - f"conservation law must have type ConservationLaw" f", was {type(law)}" + f"conservation law must have type ConservationLaw" + f", was {type(law)}" ) self._conservation_law = law @@ -425,13 +429,17 @@ def __init__( def get_measurement_symbol(self) -> sp.Symbol: if self._measurement_symbol is None: - self._measurement_symbol = generate_measurement_symbol(self.get_id()) + self._measurement_symbol = generate_measurement_symbol( + self.get_id() + ) return self._measurement_symbol def get_regularization_symbol(self) -> sp.Symbol: if self._regularization_symbol is None: - self._regularization_symbol = generate_regularization_symbol(self.get_id()) + self._regularization_symbol = generate_regularization_symbol( + self.get_id() + ) return self._regularization_symbol @@ -556,7 +564,9 @@ class Parameter(ModelQuantity): sensitivities may be computed, abbreviated by ``p``. """ - def __init__(self, identifier: sp.Symbol, name: str, value: numbers.Number): + def __init__( + self, identifier: sp.Symbol, name: str, value: numbers.Number + ): """ Create a new Expression instance. @@ -579,7 +589,9 @@ class Constant(ModelQuantity): sensitivities cannot be computed, abbreviated by ``k``. """ - def __init__(self, identifier: sp.Symbol, name: str, value: numbers.Number): + def __init__( + self, identifier: sp.Symbol, name: str, value: numbers.Number + ): """ Create a new Expression instance. diff --git a/python/sdist/amici/gradient_check.py b/python/sdist/amici/gradient_check.py index ee900fe902..27e2d671d3 100644 --- a/python/sdist/amici/gradient_check.py +++ b/python/sdist/amici/gradient_check.py @@ -193,7 +193,8 @@ def check_derivatives( fields.append("x") leastsquares_applicable = ( - solver.getSensitivityMethod() == SensitivityMethod.forward and edata is not None + solver.getSensitivityMethod() == SensitivityMethod.forward + and edata is not None ) if ( @@ -208,10 +209,18 @@ def check_derivatives( fields += ["res", "y"] _check_results( - rdata, "FIM", np.dot(rdata["sres"].T, rdata["sres"]), atol=1e-8, rtol=1e-4 + rdata, + "FIM", + np.dot(rdata["sres"].T, rdata["sres"]), + atol=1e-8, + rtol=1e-4, ) _check_results( - rdata, "sllh", -np.dot(rdata["res"].T, rdata["sres"]), atol=1e-8, rtol=1e-4 + rdata, + "sllh", + -np.dot(rdata["res"].T, rdata["sres"]), + atol=1e-8, + rtol=1e-4, ) if edata is not None: @@ -221,7 +230,15 @@ def check_derivatives( if pval == 0.0 and skip_zero_pars: continue check_finite_difference( - p, model, solver, edata, ip, fields, atol=atol, rtol=rtol, epsilon=epsilon + p, + model, + solver, + edata, + ip, + fields, + atol=atol, + rtol=rtol, + epsilon=epsilon, ) @@ -317,4 +334,6 @@ def _check_results( if type(result) is float: result = np.array(result) - _check_close(result=result, expected=expected, atol=atol, rtol=rtol, field=field) + _check_close( + result=result, expected=expected, atol=atol, rtol=rtol, field=field + ) diff --git a/python/sdist/amici/import_utils.py b/python/sdist/amici/import_utils.py index 953af3dd85..77a2add60b 100644 --- a/python/sdist/amici/import_utils.py +++ b/python/sdist/amici/import_utils.py @@ -45,14 +45,18 @@ def __init__(self, data): s = "Circular dependencies exist among these items: {{{}}}".format( ", ".join( "{!r}:{!r}".format(key, value) - for key, value in sorted({str(k): v for k, v in data.items()}.items()) + for key, value in sorted( + {str(k): v for k, v in data.items()}.items() + ) ) ) super(CircularDependencyError, self).__init__(s) self.data = data -setattr(sys.modules["toposort"], "CircularDependencyError", CircularDependencyError) +setattr( + sys.modules["toposort"], "CircularDependencyError", CircularDependencyError +) annotation_namespace = "https://github.com/AMICI-dev/AMICI" @@ -215,7 +219,8 @@ def noise_distribution_to_cost_function( y_string = "log(2*{sigma}*{m}) + Abs(log({y}) - log({m})) / {sigma}" elif noise_distribution == "log10-laplace": y_string = ( - "log(2*{sigma}*{m}*log(10)) " "+ Abs(log({y}, 10) - log({m}, 10)) / {sigma}" + "log(2*{sigma}*{m}*log(10)) " + "+ Abs(log({y}, 10) - log({m}, 10)) / {sigma}" ) elif noise_distribution in ["binomial", "lin-binomial"]: # Binomial noise model parameterized via success probability p @@ -236,7 +241,9 @@ def noise_distribution_to_cost_function( f"- {{m}} * log({{sigma}})" ) else: - raise ValueError(f"Cost identifier {noise_distribution} not recognized.") + raise ValueError( + f"Cost identifier {noise_distribution} not recognized." + ) def nllh_y_string(str_symbol): y, m, sigma = _get_str_symbol_identifiers(str_symbol) @@ -252,7 +259,10 @@ def _get_str_symbol_identifiers(str_symbol: str) -> tuple: def smart_subs_dict( - sym: sp.Expr, subs: SymbolDef, field: Optional[str] = None, reverse: bool = True + sym: sp.Expr, + subs: SymbolDef, + field: Optional[str] = None, + reverse: bool = True, ) -> sp.Expr: """ Substitutes expressions completely flattening them out. Requires @@ -275,7 +285,8 @@ def smart_subs_dict( Substituted symbolic expression """ s = [ - (eid, expr[field] if field is not None else expr) for eid, expr in subs.items() + (eid, expr[field] if field is not None else expr) + for eid, expr in subs.items() ] if reverse: s.reverse() @@ -306,7 +317,9 @@ def smart_subs(element: sp.Expr, old: sp.Symbol, new: sp.Expr) -> sp.Expr: return element.subs(old, new) if element.has(old) else element -def toposort_symbols(symbols: SymbolDef, field: Optional[str] = None) -> SymbolDef: +def toposort_symbols( + symbols: SymbolDef, field: Optional[str] = None +) -> SymbolDef: """ Topologically sort symbol definitions according to their interdependency @@ -383,7 +396,9 @@ def _parse_special_functions(sym: sp.Expr, toplevel: bool = True) -> sp.Expr: if sym.__class__.__name__ in fun_mappings: return fun_mappings[sym.__class__.__name__](*args) - elif sym.__class__.__name__ == "piecewise" or isinstance(sym, sp.Piecewise): + elif sym.__class__.__name__ == "piecewise" or isinstance( + sym, sp.Piecewise + ): if isinstance(sym, sp.Piecewise): # this is sympy piecewise, can't be nested denested_args = args @@ -435,7 +450,9 @@ def _denest_piecewise( # piece was picked previous_was_picked = sp.false # recursively denest those first - for sub_coeff, sub_cond in grouper(_denest_piecewise(cond.args), 2, True): + for sub_coeff, sub_cond in grouper( + _denest_piecewise(cond.args), 2, True + ): # flatten the individual pieces pick_this = sp.And(sp.Not(previous_was_picked), sub_cond) if sub_coeff == sp.true: @@ -516,7 +533,9 @@ def _parse_heaviside_trigger(trigger: sp.Expr) -> sp.Expr: # or(x,y) = not(and(not(x),not(y)) if isinstance(trigger, sp.Or): - return 1 - sp.Mul(*[1 - _parse_heaviside_trigger(arg) for arg in trigger.args]) + return 1 - sp.Mul( + *[1 - _parse_heaviside_trigger(arg) for arg in trigger.args] + ) if isinstance(trigger, sp.And): return sp.Mul(*[_parse_heaviside_trigger(arg) for arg in trigger.args]) @@ -527,7 +546,9 @@ def _parse_heaviside_trigger(trigger: sp.Expr) -> sp.Expr: ) -def grouper(iterable: Iterable, n: int, fillvalue: Any = None) -> Iterable[Tuple[Any]]: +def grouper( + iterable: Iterable, n: int, fillvalue: Any = None +) -> Iterable[Tuple[Any]]: """ Collect data into fixed-length chunks or blocks @@ -659,7 +680,9 @@ def generate_regularization_symbol(observable_id: Union[str, sp.Symbol]): return symbol_with_assumptions(f"r{observable_id}") -def generate_flux_symbol(reaction_index: int, name: Optional[str] = None) -> sp.Symbol: +def generate_flux_symbol( + reaction_index: int, name: Optional[str] = None +) -> sp.Symbol: """ Generate identifier symbol for a reaction flux. This function will always return the same unique python object for a diff --git a/python/sdist/amici/logging.py b/python/sdist/amici/logging.py index 5f548de7a1..2648fc5b28 100644 --- a/python/sdist/amici/logging.py +++ b/python/sdist/amici/logging.py @@ -166,7 +166,8 @@ def get_logger( _setup_logger(**kwargs) elif kwargs: warnings.warn( - "AMICI logger already exists, ignoring keyword " "arguments to setup_logger" + "AMICI logger already exists, ignoring keyword " + "arguments to setup_logger" ) logger = logging.getLogger(logger_name) @@ -193,7 +194,8 @@ def decorator_timer(func): def wrapper_timer(*args, **kwargs): # append pluses to indicate recursion level recursion_level = sum( - frame.function == "wrapper_timer" and frame.filename == __file__ + frame.function == "wrapper_timer" + and frame.filename == __file__ for frame in getouterframes(currentframe(), context=0) ) diff --git a/python/sdist/amici/numpy.py b/python/sdist/amici/numpy.py index 91ca6449f6..23ebfdbbc4 100644 --- a/python/sdist/amici/numpy.py +++ b/python/sdist/amici/numpy.py @@ -220,7 +220,8 @@ def __init__(self, rdata: Union[ReturnDataPtr, ReturnData]): """ if not isinstance(rdata, (ReturnDataPtr, ReturnData)): raise TypeError( - f"Unsupported pointer {type(rdata)}, must be" f"amici.ExpDataPtr!" + f"Unsupported pointer {type(rdata)}, must be" + f"amici.ExpDataPtr!" ) self._field_dimensions = { "ts": [rdata.nt], @@ -293,7 +294,9 @@ def __getitem__( return super().__getitem__(item) - def by_id(self, entity_id: str, field: str = None, model: Model = None) -> np.array: + def by_id( + self, entity_id: str, field: str = None, model: Model = None + ) -> np.array: """ Get the value of a given field for a named entity. @@ -311,11 +314,17 @@ def by_id(self, entity_id: str, field: str = None, model: Model = None) -> np.ar if field in {"x", "x0", "x_ss", "sx", "sx0", "sx_ss"}: ids = (model and model.getStateIds()) or self._swigptr.state_ids elif field in {"w"}: - ids = (model and model.getExpressionIds()) or self._swigptr.expression_ids + ids = ( + model and model.getExpressionIds() + ) or self._swigptr.expression_ids elif field in {"y", "sy", "sigmay"}: - ids = (model and model.getObservableIds()) or self._swigptr.observable_ids + ids = ( + model and model.getObservableIds() + ) or self._swigptr.observable_ids elif field in {"sllh"}: - ids = (model and model.getParameterIds()) or self._swigptr.parameter_ids + ids = ( + model and model.getParameterIds() + ) or self._swigptr.parameter_ids else: raise NotImplementedError( f"Subsetting {field} by ID is not implemented or not possible." @@ -348,7 +357,8 @@ def __init__(self, edata: Union[ExpDataPtr, ExpData]): """ if not isinstance(edata, (ExpDataPtr, ExpData)): raise TypeError( - f"Unsupported pointer {type(edata)}, must be" f"amici.ExpDataPtr!" + f"Unsupported pointer {type(edata)}, must be" + f"amici.ExpDataPtr!" ) self._field_dimensions = { # observables "observedData": [edata.nt(), edata.nytrue()], @@ -411,7 +421,9 @@ def _entity_type_from_id( return symbol else: if entity_id in getattr( - rdata if isinstance(rdata, amici.ReturnData) else rdata._swigptr, + rdata + if isinstance(rdata, amici.ReturnData) + else rdata._swigptr, f"{entity_type.lower()}_ids", ): return symbol diff --git a/python/sdist/amici/pandas.py b/python/sdist/amici/pandas.py index dd240242af..8a2eb5049d 100644 --- a/python/sdist/amici/pandas.py +++ b/python/sdist/amici/pandas.py @@ -107,7 +107,9 @@ def getDataObservablesAsDataFrame( _get_names_or_ids(model, "Observable", by_id=by_id) ): datadict[obs] = npdata["observedData"][i_time, i_obs] - datadict[obs + "_std"] = npdata["observedDataStdDev"][i_time, i_obs] + datadict[obs + "_std"] = npdata["observedDataStdDev"][ + i_time, i_obs + ] # add conditions _fill_conditions_dict(datadict, model, edata, by_id=by_id) @@ -396,12 +398,16 @@ def _fill_conditions_dict( datadict[par] = model.getFixedParameters()[i_par] if len(edata.fixedParametersPreequilibration): - datadict[par + "_preeq"] = edata.fixedParametersPreequilibration[i_par] + datadict[par + "_preeq"] = edata.fixedParametersPreequilibration[ + i_par + ] else: datadict[par + "_preeq"] = np.nan if len(edata.fixedParametersPresimulation): - datadict[par + "_presim"] = edata.fixedParametersPresimulation[i_par] + datadict[par + "_presim"] = edata.fixedParametersPresimulation[ + i_par + ] else: datadict[par + "_presim"] = np.nan return datadict @@ -526,7 +532,9 @@ def _get_expression_cols(model: AmiciModel, by_id: bool) -> List[str]: ) -def _get_names_or_ids(model: AmiciModel, variable: str, by_id: bool) -> List[str]: +def _get_names_or_ids( + model: AmiciModel, variable: str, by_id: bool +) -> List[str]: """ Obtains a unique list of identifiers for the specified variable. First tries model.getVariableNames and then uses model.getVariableIds. @@ -674,22 +682,33 @@ def constructEdataFromDataFrame( ) # fill in preequilibration parameters - if any([overwrite_preeq[key] != condition[key] for key in overwrite_preeq]): - edata.fixedParametersPreequilibration = _get_specialized_fixed_parameters( - model, condition, overwrite_preeq, by_id=by_id + if any( + [overwrite_preeq[key] != condition[key] for key in overwrite_preeq] + ): + edata.fixedParametersPreequilibration = ( + _get_specialized_fixed_parameters( + model, condition, overwrite_preeq, by_id=by_id + ) ) elif len(overwrite_preeq): - edata.fixedParametersPreequilibration = copy.deepcopy(edata.fixedParameters) + edata.fixedParametersPreequilibration = copy.deepcopy( + edata.fixedParameters + ) # fill in presimulation parameters if any( - [overwrite_presim[key] != condition[key] for key in overwrite_presim.keys()] + [ + overwrite_presim[key] != condition[key] + for key in overwrite_presim.keys() + ] ): edata.fixedParametersPresimulation = _get_specialized_fixed_parameters( model, condition, overwrite_presim, by_id=by_id ) elif len(overwrite_presim.keys()): - edata.fixedParametersPresimulation = copy.deepcopy(edata.fixedParameters) + edata.fixedParametersPresimulation = copy.deepcopy( + edata.fixedParameters + ) # fill in presimulation time if "t_presim" in condition.keys(): @@ -739,7 +758,9 @@ def getEdataFromDataFrame( # aggregate features that define a condition # fixed parameters - condition_parameters = _get_names_or_ids(model, "FixedParameter", by_id=by_id) + condition_parameters = _get_names_or_ids( + model, "FixedParameter", by_id=by_id + ) # preeq and presim parameters for par in _get_names_or_ids(model, "FixedParameter", by_id=by_id): if par + "_preeq" in df.columns: @@ -758,7 +779,9 @@ def getEdataFromDataFrame( selected = np.ones((len(df),), dtype=bool) for par_label, par in row.items(): if math.isnan(par): - selected = selected & np.isnan(df[par_label].astype(float).values) + selected = selected & np.isnan( + df[par_label].astype(float).values + ) else: selected = selected & (df[par_label] == par) edata_df = df[selected] diff --git a/python/sdist/amici/parameter_mapping.py b/python/sdist/amici/parameter_mapping.py index 9f4d3b24dd..f1cf75a150 100644 --- a/python/sdist/amici/parameter_mapping.py +++ b/python/sdist/amici/parameter_mapping.py @@ -126,7 +126,9 @@ class ParameterMapping(Sequence): List of parameter mappings for specific conditions. """ - def __init__(self, parameter_mappings: List[ParameterMappingForCondition] = None): + def __init__( + self, parameter_mappings: List[ParameterMappingForCondition] = None + ): super().__init__() if parameter_mappings is None: parameter_mappings = [] @@ -146,7 +148,9 @@ def __getitem__( def __len__(self): return len(self.parameter_mappings) - def append(self, parameter_mapping_for_condition: ParameterMappingForCondition): + def append( + self, parameter_mapping_for_condition: ParameterMappingForCondition + ): """Append a condition specific parameter mapping.""" self.parameter_mappings.append(parameter_mapping_for_condition) @@ -188,7 +192,8 @@ def fill_in_parameters( set(problem_parameters.keys()) - parameter_mapping.free_symbols ): warnings.warn( - "The following problem parameters were not used: " + str(unused_parameters), + "The following problem parameters were not used: " + + str(unused_parameters), RuntimeWarning, ) @@ -262,10 +267,12 @@ def _get_par(model_par, value, mapping): return value map_preeq_fix = { - key: _get_par(key, val, map_preeq_fix) for key, val in map_preeq_fix.items() + key: _get_par(key, val, map_preeq_fix) + for key, val in map_preeq_fix.items() } map_sim_fix = { - key: _get_par(key, val, map_sim_fix) for key, val in map_sim_fix.items() + key: _get_par(key, val, map_sim_fix) + for key, val in map_sim_fix.items() } map_sim_var = { key: _get_par(key, val, dict(map_sim_fix, **map_sim_var)) @@ -289,7 +296,9 @@ def _get_par(model_par, value, mapping): # variable parameters and parameter scale # parameter list from mapping dict - parameters = [map_sim_var[par_id] for par_id in amici_model.getParameterIds()] + parameters = [ + map_sim_var[par_id] for par_id in amici_model.getParameterIds() + ] # scales list from mapping dict scales = [ @@ -317,7 +326,8 @@ def _get_par(model_par, value, mapping): # fixed parameters preequilibration if map_preeq_fix: fixed_pars_preeq = [ - map_preeq_fix[par_id] for par_id in amici_model.getFixedParameterIds() + map_preeq_fix[par_id] + for par_id in amici_model.getFixedParameterIds() ] edata.fixedParametersPreequilibration = fixed_pars_preeq @@ -325,7 +335,8 @@ def _get_par(model_par, value, mapping): # fixed parameters simulation if map_sim_fix: fixed_pars_sim = [ - map_sim_fix[par_id] for par_id in amici_model.getFixedParameterIds() + map_sim_fix[par_id] + for par_id in amici_model.getFixedParameterIds() ] edata.fixedParameters = fixed_pars_sim @@ -370,11 +381,14 @@ def scale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number: if petab_scale == LOG: return np.log(value) raise ValueError( - f"Unknown parameter scale {petab_scale}. " f"Must be from {(LIN, LOG, LOG10)}" + f"Unknown parameter scale {petab_scale}. " + f"Must be from {(LIN, LOG, LOG10)}" ) -def unscale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number: +def unscale_parameter( + value: numbers.Number, petab_scale: str +) -> numbers.Number: """Bring parameter from scale to linear scale. :param value: @@ -392,7 +406,8 @@ def unscale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number if petab_scale == LOG: return np.exp(value) raise ValueError( - f"Unknown parameter scale {petab_scale}. " f"Must be from {(LIN, LOG, LOG10)}" + f"Unknown parameter scale {petab_scale}. " + f"Must be from {(LIN, LOG, LOG10)}" ) diff --git a/python/sdist/amici/petab_import.py b/python/sdist/amici/petab_import.py index 909bf250ae..23fe4394f0 100644 --- a/python/sdist/amici/petab_import.py +++ b/python/sdist/amici/petab_import.py @@ -306,7 +306,9 @@ def import_petab_problem( if petab_problem.mapping_df is not None: # It's partially supported. Remove at your own risk... - raise NotImplementedError("PEtab v2.0.0 mapping tables are not yet supported.") + raise NotImplementedError( + "PEtab v2.0.0 mapping tables are not yet supported." + ) model_name = model_name or petab_problem.model.model_id @@ -366,7 +368,9 @@ def import_petab_problem( model = model_module.getModel() check_model(amici_model=model, petab_problem=petab_problem) - logger.info(f"Successfully loaded model {model_name} " f"from {model_output_dir}.") + logger.info( + f"Successfully loaded model {model_name} " f"from {model_output_dir}." + ) return model @@ -383,7 +387,9 @@ def check_model( amici_ids = amici_ids_free | set(amici_model.getFixedParameterIds()) petab_ids_free = set( - petab_problem.parameter_df.loc[petab_problem.parameter_df[ESTIMATE] == 1].index + petab_problem.parameter_df.loc[ + petab_problem.parameter_df[ESTIMATE] == 1 + ].index ) amici_ids_free_required = petab_ids_free.intersection(amici_ids) @@ -429,7 +435,9 @@ def _create_model_name(folder: Union[str, Path]) -> str: return os.path.split(os.path.normpath(folder))[-1] -def _can_import_model(model_name: str, model_output_dir: Union[str, Path]) -> bool: +def _can_import_model( + model_name: str, model_output_dir: Union[str, Path] +) -> bool: """ Check whether a module of that name can already be imported. """ @@ -555,7 +563,8 @@ def import_model_sbml( if petab_problem.observable_df is None: raise NotImplementedError( - "PEtab import without observables table " "is currently not supported." + "PEtab import without observables table " + "is currently not supported." ) assert isinstance(petab_problem.model, SbmlModel) @@ -596,8 +605,10 @@ def import_model_sbml( ) sbml_model = sbml_importer.sbml - allow_n_noise_pars = not petab.lint.observable_table_has_nontrivial_noise_formula( - petab_problem.observable_df + allow_n_noise_pars = ( + not petab.lint.observable_table_has_nontrivial_noise_formula( + petab_problem.observable_df + ) ) if ( petab_problem.measurement_df is not None @@ -632,7 +643,9 @@ def import_model_sbml( # so we add any output parameters to the SBML model. # this should be changed to something more elegant # - formulas = chain((val["formula"] for val in observables.values()), sigmas.values()) + formulas = chain( + (val["formula"] for val in observables.values()), sigmas.values() + ) output_parameters = OrderedDict() for formula in formulas: # we want reproducible parameter ordering upon repeated import @@ -649,10 +662,13 @@ def import_model_sbml( ): output_parameters[sym] = None logger.debug( - "Adding output parameters to model: " f"{list(output_parameters.keys())}" + "Adding output parameters to model: " + f"{list(output_parameters.keys())}" ) output_parameter_defaults = output_parameter_defaults or {} - if extra_pars := (set(output_parameter_defaults) - set(output_parameters.keys())): + if extra_pars := ( + set(output_parameter_defaults) - set(output_parameters.keys()) + ): raise ValueError( f"Default output parameter values were given for {extra_pars}, " "but they those are not output parameters." @@ -691,7 +707,8 @@ def import_model_sbml( # Can only reset parameters after preequilibration if they are fixed. fixed_parameters.append(PREEQ_INDICATOR_ID) logger.debug( - "Adding preequilibration indicator " f"constant {PREEQ_INDICATOR_ID}" + "Adding preequilibration indicator " + f"constant {PREEQ_INDICATOR_ID}" ) logger.debug(f"Adding initial assignments for {initial_states.keys()}") for assignee_id in initial_states: @@ -756,7 +773,8 @@ def import_model_sbml( ) if kwargs.get( - "compile", amici._get_default_argument(sbml_importer.sbml2amici, "compile") + "compile", + amici._get_default_argument(sbml_importer.sbml2amici, "compile"), ): # check that the model extension was compiled successfully model_module = amici.import_model_module(model_name, model_output_dir) @@ -772,7 +790,9 @@ def import_model_sbml( def get_observation_model( observable_df: pd.DataFrame, -) -> Tuple[Dict[str, Dict[str, str]], Dict[str, str], Dict[str, Union[str, float]]]: +) -> Tuple[ + Dict[str, Dict[str, str]], Dict[str, str], Dict[str, Union[str, float]] +]: """ Get observables, sigmas, and noise distributions from PEtab observation table in a format suitable for @@ -804,7 +824,9 @@ def get_observation_model( # cannot handle states in sigma expressions. Therefore, where possible, # replace species occurring in error model definition by observableIds. replacements = { - sp.sympify(observable["formula"], locals=_clash): sp.Symbol(observable_id) + sp.sympify(observable["formula"], locals=_clash): sp.Symbol( + observable_id + ) for observable_id, observable in observables.items() } for observable_id, formula in sigmas.items(): @@ -816,7 +838,9 @@ def get_observation_model( return observables, noise_distrs, sigmas -def petab_noise_distributions_to_amici(observable_df: pd.DataFrame) -> Dict[str, str]: +def petab_noise_distributions_to_amici( + observable_df: pd.DataFrame, +) -> Dict[str, str]: """ Map from the petab to the amici format of noise distribution identifiers. @@ -868,7 +892,9 @@ def show_model_info(sbml_model: "libsbml.Model"): """Log some model quantities""" logger.info(f"Species: {len(sbml_model.getListOfSpecies())}") - logger.info("Global parameters: " + str(len(sbml_model.getListOfParameters()))) + logger.info( + "Global parameters: " + str(len(sbml_model.getListOfParameters())) + ) logger.info(f"Reactions: {len(sbml_model.getListOfReactions())}") @@ -930,20 +956,35 @@ def _parse_cli_args(): "-s", "--sbml", dest="sbml_file_name", help="SBML model filename" ) parser.add_argument( - "-m", "--measurements", dest="measurement_file_name", help="Measurement table" + "-m", + "--measurements", + dest="measurement_file_name", + help="Measurement table", ) parser.add_argument( - "-c", "--conditions", dest="condition_file_name", help="Conditions table" + "-c", + "--conditions", + dest="condition_file_name", + help="Conditions table", ) parser.add_argument( - "-p", "--parameters", dest="parameter_file_name", help="Parameter table" + "-p", + "--parameters", + dest="parameter_file_name", + help="Parameter table", ) parser.add_argument( - "-b", "--observables", dest="observable_file_name", help="Observable table" + "-b", + "--observables", + dest="observable_file_name", + help="Observable table", ) parser.add_argument( - "-y", "--yaml", dest="yaml_file_name", help="PEtab YAML problem filename" + "-y", + "--yaml", + dest="yaml_file_name", + help="PEtab YAML problem filename", ) parser.add_argument( @@ -956,7 +997,11 @@ def _parse_cli_args(): args = parser.parse_args() if not args.yaml_file_name and not all( - (args.sbml_file_name, args.condition_file_name, args.observable_file_name) + ( + args.sbml_file_name, + args.condition_file_name, + args.observable_file_name, + ) ): parser.error( "When not specifying a model name or YAML file, then " diff --git a/python/sdist/amici/petab_import_pysb.py b/python/sdist/amici/petab_import_pysb.py index 63c1dd9681..8036d1358d 100644 --- a/python/sdist/amici/petab_import_pysb.py +++ b/python/sdist/amici/petab_import_pysb.py @@ -23,7 +23,9 @@ logger = get_logger(__name__, logging.WARNING) -def _add_observation_model(pysb_model: pysb.Model, petab_problem: petab.Problem): +def _add_observation_model( + pysb_model: pysb.Model, petab_problem: petab.Problem +): """Extend PySB model by observation model as defined in the PEtab observables table""" @@ -65,7 +67,9 @@ def _add_observation_model(pysb_model: pysb.Model, petab_problem: petab.Problem) local_syms[sigma_id] = sigma_expr -def _add_initialization_variables(pysb_model: pysb.Model, petab_problem: petab.Problem): +def _add_initialization_variables( + pysb_model: pysb.Model, petab_problem: petab.Problem +): """Add initialization variables to the PySB model to support initial conditions specified in the PEtab condition table. @@ -92,7 +96,8 @@ def _add_initialization_variables(pysb_model: pysb.Model, petab_problem: petab.P # Can only reset parameters after preequilibration if they are fixed. fixed_parameters.append(PREEQ_INDICATOR_ID) logger.debug( - "Adding preequilibration indicator constant " f"{PREEQ_INDICATOR_ID}" + "Adding preequilibration indicator constant " + f"{PREEQ_INDICATOR_ID}" ) logger.debug(f"Adding initial assignments for {initial_states.keys()}") @@ -131,7 +136,9 @@ def _add_initialization_variables(pysb_model: pysb.Model, petab_problem: petab.P pysb_model.add_component(formula) for initial in pysb_model.initials: - if match_complex_pattern(initial.pattern, species_pattern, exact=True): + if match_complex_pattern( + initial.pattern, species_pattern, exact=True + ): logger.debug( "The PySB model has an initial defined for species " f"{assignee_id}, but this species also has an initial " @@ -226,9 +233,14 @@ def import_model_pysb( f"column: {x}" ) - from .petab_import import get_fixed_parameters, petab_noise_distributions_to_amici + from .petab_import import ( + get_fixed_parameters, + petab_noise_distributions_to_amici, + ) - constant_parameters = get_fixed_parameters(petab_problem) + fixed_parameters + constant_parameters = ( + get_fixed_parameters(petab_problem) + fixed_parameters + ) if petab_problem.observable_df is None: observables = None @@ -243,7 +255,9 @@ def import_model_pysb( sigmas = {obs_id: f"{obs_id}_sigma" for obs_id in observables} - noise_distrs = petab_noise_distributions_to_amici(petab_problem.observable_df) + noise_distrs = petab_noise_distributions_to_amici( + petab_problem.observable_df + ) from amici.pysb_import import pysb2amici diff --git a/python/sdist/amici/petab_objective.py b/python/sdist/amici/petab_objective.py index f518724c82..87409ee446 100644 --- a/python/sdist/amici/petab_objective.py +++ b/python/sdist/amici/petab_objective.py @@ -144,7 +144,11 @@ def simulate_petab( # number of amici simulations will be number of unique # (preequilibrationConditionId, simulationConditionId) pairs. # Can be optimized by checking for identical condition vectors. - if simulation_conditions is None and parameter_mapping is None and edatas is None: + if ( + simulation_conditions is None + and parameter_mapping is None + and edatas is None + ): simulation_conditions = ( petab_problem.get_simulation_conditions_from_measurement_df() ) @@ -262,7 +266,8 @@ def aggregate_sllh( if petab_scale and petab_problem is None: raise ValueError( - "Please provide the PEtab problem, when using " "`petab_scale=True`." + "Please provide the PEtab problem, when using " + "`petab_scale=True`." ) # Check for issues in all condition simulation results. @@ -280,7 +285,9 @@ def aggregate_sllh( for condition_parameter_mapping, edata, rdata in zip( parameter_mapping, edatas, rdatas ): - for sllh_parameter_index, condition_parameter_sllh in enumerate(rdata.sllh): + for sllh_parameter_index, condition_parameter_sllh in enumerate( + rdata.sllh + ): # Get PEtab parameter ID # Use ExpData if it provides a parameter list, else default to # Model. @@ -301,9 +308,11 @@ def aggregate_sllh( if petab_scale: # `ParameterMappingForCondition` objects provide the scale in # terms of `petab.C` constants already, not AMICI equivalents. - model_parameter_scale = condition_parameter_mapping.scale_map_sim_var[ - model_parameter_id - ] + model_parameter_scale = ( + condition_parameter_mapping.scale_map_sim_var[ + model_parameter_id + ] + ) petab_parameter_scale = petab_problem.parameter_df.loc[ petab_parameter_id, PARAMETER_SCALE ] @@ -362,7 +371,9 @@ def rescale_sensitivity( scale[(LOG10, LOG)] = lambda s: scale[(LIN, LOG)](scale[(LOG10, LIN)](s)) if (old_scale, new_scale) not in scale: - raise NotImplementedError(f"Old scale: {old_scale}. New scale: {new_scale}.") + raise NotImplementedError( + f"Old scale: {old_scale}. New scale: {new_scale}." + ) return scale[(old_scale, new_scale)](sensitivity) @@ -497,14 +508,18 @@ def create_parameter_mapping( if parameter_mapping_kwargs is None: parameter_mapping_kwargs = {} - prelim_parameter_mapping = petab.get_optimization_to_simulation_parameter_mapping( - condition_df=petab_problem.condition_df, - measurement_df=petab_problem.measurement_df, - parameter_df=petab_problem.parameter_df, - observable_df=petab_problem.observable_df, - mapping_df=petab_problem.mapping_df, - model=petab_problem.model, - **dict(default_parameter_mapping_kwargs, **parameter_mapping_kwargs), + prelim_parameter_mapping = ( + petab.get_optimization_to_simulation_parameter_mapping( + condition_df=petab_problem.condition_df, + measurement_df=petab_problem.measurement_df, + parameter_df=petab_problem.parameter_df, + observable_df=petab_problem.observable_df, + mapping_df=petab_problem.mapping_df, + model=petab_problem.model, + **dict( + default_parameter_mapping_kwargs, **parameter_mapping_kwargs + ), + ) ) parameter_mapping = ParameterMapping() @@ -529,7 +544,8 @@ def _get_initial_state_sbml( ) if initial_assignment: initial_assignment = sp.sympify( - libsbml.formulaToL3String(initial_assignment.getMath()), locals=_clash + libsbml.formulaToL3String(initial_assignment.getMath()), + locals=_clash, ) if type_code == libsbml.SBML_SPECIES: value = ( @@ -538,12 +554,21 @@ def _get_initial_state_sbml( else initial_assignment ) elif type_code == libsbml.SBML_PARAMETER: - value = element.getValue() if initial_assignment is None else initial_assignment + value = ( + element.getValue() + if initial_assignment is None + else initial_assignment + ) elif type_code == libsbml.SBML_COMPARTMENT: - value = element.getSize() if initial_assignment is None else initial_assignment + value = ( + element.getSize() + if initial_assignment is None + else initial_assignment + ) else: raise NotImplementedError( - f"Don't know what how to handle {element_id} in " "condition table." + f"Don't know what how to handle {element_id} in " + "condition table." ) return value @@ -559,7 +584,9 @@ def _get_initial_state_pysb( ( initial.value for initial in petab_problem.model.model.initials - if match_complex_pattern(initial.pattern, species_pattern, exact=True) + if match_complex_pattern( + initial.pattern, species_pattern, exact=True + ) ), 0.0, ) @@ -616,9 +643,9 @@ def _set_initial_state( scale_map[init_par_id] = petab.LIN else: # parametric initial state - scale_map[init_par_id] = petab_problem.parameter_df[PARAMETER_SCALE].get( - value, petab.LIN - ) + scale_map[init_par_id] = petab_problem.parameter_df[ + PARAMETER_SCALE + ].get(value, petab.LIN) def create_parameter_mapping_for_condition( @@ -656,13 +683,17 @@ def create_parameter_mapping_for_condition( condition_map_sim ) != len(condition_scale_map_sim): raise AssertionError( - "Number of parameters and number of parameter " "scales do not match." + "Number of parameters and number of parameter " + "scales do not match." ) - if len(condition_map_preeq) and len(condition_map_preeq) != len(condition_map_sim): + if len(condition_map_preeq) and len(condition_map_preeq) != len( + condition_map_sim + ): logger.debug(f"Preequilibration parameter map: {condition_map_preeq}") logger.debug(f"Simulation parameter map: {condition_map_sim}") raise AssertionError( - "Number of parameters for preequilbration " "and simulation do not match." + "Number of parameters for preequilbration " + "and simulation do not match." ) ########################################################################## @@ -690,7 +721,10 @@ def create_parameter_mapping_for_condition( condition_map_sim[PREEQ_INDICATOR_ID] = 0.0 condition_scale_map_sim[PREEQ_INDICATOR_ID] = LIN - for element_id, (value, preeq_value) in states_in_condition_table.items(): + for element_id, ( + value, + preeq_value, + ) in states_in_condition_table.items(): # for preequilibration init_par_id = f"initial_{element_id}_preeq" if ( @@ -738,7 +772,10 @@ def create_parameter_mapping_for_condition( condition_map_preeq, variable_par_ids, fixed_par_ids ) - condition_scale_map_preeq_var, condition_scale_map_preeq_fix = _subset_dict( + ( + condition_scale_map_preeq_var, + condition_scale_map_preeq_fix, + ) = _subset_dict( condition_scale_map_preeq, variable_par_ids, fixed_par_ids ) @@ -750,9 +787,13 @@ def create_parameter_mapping_for_condition( condition_scale_map_sim, variable_par_ids, fixed_par_ids ) - logger.debug("Fixed parameters preequilibration: " f"{condition_map_preeq_fix}") + logger.debug( + "Fixed parameters preequilibration: " f"{condition_map_preeq_fix}" + ) logger.debug("Fixed parameters simulation: " f"{condition_map_sim_fix}") - logger.debug("Variable parameters preequilibration: " f"{condition_map_preeq_var}") + logger.debug( + "Variable parameters preequilibration: " f"{condition_map_preeq_var}" + ) logger.debug("Variable parameters simulation: " f"{condition_map_sim_var}") petab.merge_preeq_and_sim_pars_condition( @@ -874,7 +915,10 @@ def create_edata_for_condition( states_in_condition_table = get_states_in_condition_table( petab_problem, condition=condition ) - if condition.get(PREEQUILIBRATION_CONDITION_ID) and states_in_condition_table: + if ( + condition.get(PREEQUILIBRATION_CONDITION_ID) + and states_in_condition_table + ): state_ids = amici_model.getStateIds() state_idx_reinitalization = [ state_ids.index(s) @@ -893,7 +937,9 @@ def create_edata_for_condition( # timepoints # find replicate numbers of time points - timepoints_w_reps = _get_timepoints_with_replicates(df_for_condition=measurement_df) + timepoints_w_reps = _get_timepoints_with_replicates( + df_for_condition=measurement_df + ) edata.setTimepoints(timepoints_w_reps) ########################################################################## @@ -946,7 +992,9 @@ def _get_timepoints_with_replicates( timepoints_w_reps = [] for time in timepoints: # subselect for time - df_for_time = df_for_condition[df_for_condition.time.astype(float) == time] + df_for_time = df_for_condition[ + df_for_condition.time.astype(float) == time + ] # rep number is maximum over rep numbers for observables n_reps = max(df_for_time.groupby([OBSERVABLE_ID, TIME]).size()) # append time point n_rep times @@ -979,7 +1027,9 @@ def _get_measurements_and_sigmas( arrays for measurement and sigmas """ # prepare measurement matrix - y = np.full(shape=(len(timepoints_w_reps), len(observable_ids)), fill_value=np.nan) + y = np.full( + shape=(len(timepoints_w_reps), len(observable_ids)), fill_value=np.nan + ) # prepare sigma matrix sigma_y = y.copy() @@ -1008,15 +1058,19 @@ def _get_measurements_and_sigmas( y[time_ix_for_obs_ix[observable_ix], observable_ix] = measurement[ MEASUREMENT ] - if isinstance(measurement.get(NOISE_PARAMETERS, None), numbers.Number): - sigma_y[time_ix_for_obs_ix[observable_ix], observable_ix] = measurement[ - NOISE_PARAMETERS - ] + if isinstance( + measurement.get(NOISE_PARAMETERS, None), numbers.Number + ): + sigma_y[ + time_ix_for_obs_ix[observable_ix], observable_ix + ] = measurement[NOISE_PARAMETERS] return y, sigma_y def rdatas_to_measurement_df( - rdatas: Sequence[amici.ReturnData], model: AmiciModel, measurement_df: pd.DataFrame + rdatas: Sequence[amici.ReturnData], + model: AmiciModel, + measurement_df: pd.DataFrame, ) -> pd.DataFrame: """ Create a measurement dataframe in the PEtab format from the passed @@ -1047,7 +1101,9 @@ def rdatas_to_measurement_df( t = list(rdata.ts) # extract rows for condition - cur_measurement_df = petab.get_rows_for_condition(measurement_df, condition) + cur_measurement_df = petab.get_rows_for_condition( + measurement_df, condition + ) # iterate over entries for the given condition # note: this way we only generate a dataframe entry for every @@ -1072,7 +1128,9 @@ def rdatas_to_measurement_df( def rdatas_to_simulation_df( - rdatas: Sequence[amici.ReturnData], model: AmiciModel, measurement_df: pd.DataFrame + rdatas: Sequence[amici.ReturnData], + model: AmiciModel, + measurement_df: pd.DataFrame, ) -> pd.DataFrame: """Create a PEtab simulation dataframe from :class:`amici.amici.ReturnData` s. diff --git a/python/sdist/amici/petab_simulate.py b/python/sdist/amici/petab_simulate.py index d243a28b8b..32c1ef8955 100644 --- a/python/sdist/amici/petab_simulate.py +++ b/python/sdist/amici/petab_simulate.py @@ -18,7 +18,11 @@ import petab from amici import AmiciModel, SensitivityMethod_none from amici.petab_import import import_petab_problem -from amici.petab_objective import RDATAS, rdatas_to_measurement_df, simulate_petab +from amici.petab_objective import ( + RDATAS, + rdatas_to_measurement_df, + simulate_petab, +) AMICI_MODEL = "amici_model" AMICI_SOLVER = "solver" @@ -49,7 +53,10 @@ def simulate_without_noise(self, **kwargs) -> pd.DataFrame: in the Simulator constructor (including the PEtab problem). """ if AMICI_MODEL in {*kwargs, *dir(self)} and ( - any(k in kwargs for k in inspect.signature(import_petab_problem).parameters) + any( + k in kwargs + for k in inspect.signature(import_petab_problem).parameters + ) ): print( "Arguments related to the PEtab import are unused if " diff --git a/python/sdist/amici/petab_util.py b/python/sdist/amici/petab_util.py index a9666d84ac..9108b108bc 100644 --- a/python/sdist/amici/petab_util.py +++ b/python/sdist/amici/petab_util.py @@ -27,7 +27,9 @@ def get_states_in_condition_table( raise NotImplementedError() species_check_funs = { - MODEL_TYPE_SBML: lambda x: _element_is_sbml_state(petab_problem.sbml_model, x), + MODEL_TYPE_SBML: lambda x: _element_is_sbml_state( + petab_problem.sbml_model, x + ), MODEL_TYPE_PYSB: lambda x: _element_is_pysb_pattern( petab_problem.model.model, x ), @@ -36,7 +38,9 @@ def get_states_in_condition_table( resolve_mapping(petab_problem.mapping_df, col): (None, None) if condition is None else ( - petab_problem.condition_df.loc[condition[SIMULATION_CONDITION_ID], col], + petab_problem.condition_df.loc[ + condition[SIMULATION_CONDITION_ID], col + ], petab_problem.condition_df.loc[ condition[PREEQUILIBRATION_CONDITION_ID], col ] @@ -60,7 +64,9 @@ def get_states_in_condition_table( pysb.bng.generate_equations(petab_problem.model.model) try: - spm = pysb.pattern.SpeciesPatternMatcher(model=petab_problem.model.model) + spm = pysb.pattern.SpeciesPatternMatcher( + model=petab_problem.model.model + ) except NotImplementedError as e: raise NotImplementedError( "Requires https://github.com/pysb/pysb/pull/570. " diff --git a/python/sdist/amici/pysb_import.py b/python/sdist/amici/pysb_import.py index 0dbba69ddb..aa1dc7cd9b 100644 --- a/python/sdist/amici/pysb_import.py +++ b/python/sdist/amici/pysb_import.py @@ -10,7 +10,17 @@ import os import sys from pathlib import Path -from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) import numpy as np import pysb @@ -255,8 +265,12 @@ def ode_model_from_pysb_importer( _process_pysb_parameters(model, ode, constant_parameters) if compute_conservation_laws: _process_pysb_conservation_laws(model, ode) - _process_pysb_observables(model, ode, observables, sigmas, noise_distributions) - _process_pysb_expressions(model, ode, observables, sigmas, noise_distributions) + _process_pysb_observables( + model, ode, observables, sigmas, noise_distributions + ) + _process_pysb_expressions( + model, ode, observables, sigmas, noise_distributions + ) ode._has_quadratic_nllh = not noise_distributions or all( noise_distr in ["normal", "lin-normal", "log-normal", "log10-normal"] for noise_distr in noise_distributions.values() @@ -382,7 +396,9 @@ def _process_pysb_species(pysb_model: pysb.Model, ode_model: DEModel) -> None: for ix, specie in enumerate(pysb_model.species): init = sp.sympify("0.0") for ic in pysb_model.odes.model.initials: - if pysb.pattern.match_complex_pattern(ic.pattern, specie, exact=True): + if pysb.pattern.match_complex_pattern( + ic.pattern, specie, exact=True + ): # we don't want to allow expressions in initial conditions if ic.value in pysb_model.expressions: init = pysb_model.expressions[ic.value.name].expand_expr() @@ -390,7 +406,9 @@ def _process_pysb_species(pysb_model: pysb.Model, ode_model: DEModel) -> None: init = ic.value ode_model.add_component( - DifferentialState(sp.Symbol(f"__s{ix}"), f"{specie}", init, xdot[ix]) + DifferentialState( + sp.Symbol(f"__s{ix}"), f"{specie}", init, xdot[ix] + ) ) logger.debug(f"Finished Processing PySB species ") @@ -464,7 +482,8 @@ def _process_pysb_expressions( include_derived=True ) | pysb_model.expressions_dynamic(include_derived=True): if any( - isinstance(symbol, pysb.Tag) for symbol in expr.expand_expr().free_symbols + isinstance(symbol, pysb.Tag) + for symbol in expr.expand_expr().free_symbols ): # we only need explicit instantiations of expressions with tags, # which are defined in the derived expressions. The abstract @@ -521,11 +540,15 @@ def _add_expression( :param ode_model: see :py:func:`_process_pysb_expressions` """ - ode_model.add_component(Expression(sym, name, _parse_special_functions(expr))) + ode_model.add_component( + Expression(sym, name, _parse_special_functions(expr)) + ) if name in observables: noise_dist = ( - noise_distributions.get(name, "normal") if noise_distributions else "normal" + noise_distributions.get(name, "normal") + if noise_distributions + else "normal" ) y = sp.Symbol(f"{name}") @@ -533,7 +556,9 @@ def _add_expression( obs = Observable(y, name, sym, transformation=trafo) ode_model.add_component(obs) - sigma_name, sigma_value = _get_sigma_name_and_value(pysb_model, name, sigmas) + sigma_name, sigma_value = _get_sigma_name_and_value( + pysb_model, name, sigmas + ) sigma = sp.Symbol(sigma_name) ode_model.add_component(SigmaY(sigma, f"{sigma_name}", sigma_value)) @@ -542,10 +567,14 @@ def _add_expression( my = generate_measurement_symbol(obs.get_id()) cost_fun_expr = sp.sympify( cost_fun_str, - locals=dict(zip(_get_str_symbol_identifiers(name), (y, my, sigma))), + locals=dict( + zip(_get_str_symbol_identifiers(name), (y, my, sigma)) + ), ) ode_model.add_component( - LogLikelihoodY(sp.Symbol(f"llh_{name}"), f"llh_{name}", cost_fun_expr) + LogLikelihoodY( + sp.Symbol(f"llh_{name}"), f"llh_{name}", cost_fun_expr + ) ) @@ -575,7 +604,9 @@ def _get_sigma_name_and_value( sigma_name = sigmas[obs_name] try: # find corresponding Expression instance - sigma_expr = next(x for x in pysb_model.expressions if x.name == sigma_name) + sigma_expr = next( + x for x in pysb_model.expressions if x.name == sigma_name + ) except StopIteration: raise ValueError( f"value of sigma {obs_name} is not a " f"valid expression." @@ -633,7 +664,9 @@ def _process_pysb_observables( @log_execution_time("computing PySB conservation laws", logger) -def _process_pysb_conservation_laws(pysb_model: pysb.Model, ode_model: DEModel) -> None: +def _process_pysb_conservation_laws( + pysb_model: pysb.Model, ode_model: DEModel +) -> None: """ Removes species according to conservation laws to ensure that the jacobian has full rank @@ -647,7 +680,9 @@ def _process_pysb_conservation_laws(pysb_model: pysb.Model, ode_model: DEModel) monomers_without_conservation_law = set() for rule in pysb_model.rules: - monomers_without_conservation_law |= _get_unconserved_monomers(rule, pysb_model) + monomers_without_conservation_law |= _get_unconserved_monomers( + rule, pysb_model + ) monomers_without_conservation_law |= ( _compute_monomers_with_fixed_initial_conditions(pysb_model) @@ -667,7 +702,9 @@ def _process_pysb_conservation_laws(pysb_model: pysb.Model, ode_model: DEModel) ode_model.add_conservation_law(**cl) -def _compute_monomers_with_fixed_initial_conditions(pysb_model: pysb.Model) -> Set[str]: +def _compute_monomers_with_fixed_initial_conditions( + pysb_model: pysb.Model, +) -> Set[str]: """ Computes the set of monomers in a model with species that have fixed initial conditions @@ -696,7 +733,9 @@ def _compute_monomers_with_fixed_initial_conditions(pysb_model: pysb.Model) -> S def _generate_cl_prototypes( - excluded_monomers: Iterable[str], pysb_model: pysb.Model, ode_model: DEModel + excluded_monomers: Iterable[str], + pysb_model: pysb.Model, + ode_model: DEModel, ) -> CL_Prototype: """ Constructs a dict that contains preprocessed information for the @@ -717,7 +756,9 @@ def _generate_cl_prototypes( """ cl_prototypes = dict() - _compute_possible_indices(cl_prototypes, pysb_model, ode_model, excluded_monomers) + _compute_possible_indices( + cl_prototypes, pysb_model, ode_model, excluded_monomers + ) _compute_dependency_idx(cl_prototypes) _compute_target_index(cl_prototypes, ode_model) @@ -825,7 +866,9 @@ def _compute_dependency_idx(cl_prototypes: CL_Prototype) -> None: prototype_j["dependency_idx"][idx] |= {monomer_i} -def _compute_target_index(cl_prototypes: CL_Prototype, ode_model: DEModel) -> None: +def _compute_target_index( + cl_prototypes: CL_Prototype, ode_model: DEModel +) -> None: """ Computes the target index for every monomer @@ -885,7 +928,9 @@ def _compute_target_index(cl_prototypes: CL_Prototype, ode_model: DEModel) -> No # multimers has a low upper bound and the species count does not # vary too much across conservation laws, this approximation # should be fine - prototype["fillin"] = prototype["appearance_count"] * prototype["species_count"] + prototype["fillin"] = ( + prototype["appearance_count"] * prototype["species_count"] + ) # we might end up with the same index for multiple monomers, so loop until # we have a set of unique target indices @@ -936,7 +981,9 @@ def _cl_has_cycle(monomer: str, cl_prototypes: CL_Prototype) -> bool: root = monomer return any( _is_in_cycle(connecting_monomer, cl_prototypes, visited, root) - for connecting_monomer in prototype["dependency_idx"][prototype["target_index"]] + for connecting_monomer in prototype["dependency_idx"][ + prototype["target_index"] + ] ) @@ -980,7 +1027,9 @@ def _is_in_cycle( return any( _is_in_cycle(connecting_monomer, cl_prototypes, visited, root) - for connecting_monomer in prototype["dependency_idx"][prototype["target_index"]] + for connecting_monomer in prototype["dependency_idx"][ + prototype["target_index"] + ] ) @@ -997,9 +1046,9 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None: target_indices = _get_target_indices(cl_prototypes) for monomer, prototype in cl_prototypes.items(): - if target_indices.count(prototype["target_index"]) > 1 or _cl_has_cycle( - monomer, cl_prototypes - ): + if target_indices.count( + prototype["target_index"] + ) > 1 or _cl_has_cycle(monomer, cl_prototypes): # compute how much fillin the next best target_index would yield # we exclude already existing target indices to avoid that @@ -1008,7 +1057,9 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None: # solution but prevents infinite loops for target_index in list(set(target_indices)): try: - local_idx = prototype["possible_indices"].index(target_index) + local_idx = prototype["possible_indices"].index( + target_index + ) except ValueError: local_idx = None @@ -1023,13 +1074,16 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None: idx = np.argmin(prototype["appearance_counts"]) prototype["local_index"] = idx - prototype["alternate_target_index"] = prototype["possible_indices"][idx] - prototype["alternate_appearance_count"] = prototype["appearance_counts"][ - idx - ] + prototype["alternate_target_index"] = prototype[ + "possible_indices" + ][idx] + prototype["alternate_appearance_count"] = prototype[ + "appearance_counts" + ][idx] prototype["alternate_fillin"] = ( - prototype["alternate_appearance_count"] * prototype["species_count"] + prototype["alternate_appearance_count"] + * prototype["species_count"] ) prototype["diff_fillin"] = ( @@ -1038,13 +1092,18 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None: else: prototype["diff_fillin"] = -1 - if all(prototype["diff_fillin"] == -1 for prototype in cl_prototypes.values()): + if all( + prototype["diff_fillin"] == -1 for prototype in cl_prototypes.values() + ): raise RuntimeError( - "Could not compute a valid set of conservation " "laws for this model!" + "Could not compute a valid set of conservation " + "laws for this model!" ) # this puts prototypes with high diff_fillin last - cl_prototypes = sorted(cl_prototypes.items(), key=lambda kv: kv[1]["diff_fillin"]) + cl_prototypes = sorted( + cl_prototypes.items(), key=lambda kv: kv[1]["diff_fillin"] + ) cl_prototypes = {proto[0]: proto[1] for proto in cl_prototypes} for monomer in cl_prototypes: @@ -1058,12 +1117,15 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None: # are recomputed on the fly) if prototype["diff_fillin"] > -1 and ( - _get_target_indices(cl_prototypes).count(prototype["target_index"]) > 1 + _get_target_indices(cl_prototypes).count(prototype["target_index"]) + > 1 or _cl_has_cycle(monomer, cl_prototypes) ): prototype["fillin"] = prototype["alternate_fillin"] prototype["target_index"] = prototype["alternate_target_index"] - prototype["appearance_count"] = prototype["alternate_appearance_count"] + prototype["appearance_count"] = prototype[ + "alternate_appearance_count" + ] del prototype["possible_indices"][prototype["local_index"]] del prototype["appearance_counts"][prototype["local_index"]] @@ -1146,7 +1208,9 @@ def _add_conservation_for_constant_species( ) -def _flatten_conservation_laws(conservation_laws: List[ConservationLaw]) -> None: +def _flatten_conservation_laws( + conservation_laws: List[ConservationLaw], +) -> None: """ Flatten the conservation laws such that the state_expr not longer depend on any states that are replaced by conservation laws @@ -1160,9 +1224,12 @@ def _flatten_conservation_laws(conservation_laws: List[ConservationLaw]) -> None for cl in conservation_laws: # only update if we changed something if any( - _apply_conseration_law_sub(cl, sub) for sub in conservation_law_subs + _apply_conseration_law_sub(cl, sub) + for sub in conservation_law_subs ): - conservation_law_subs = _get_conservation_law_subs(conservation_laws) + conservation_law_subs = _get_conservation_law_subs( + conservation_laws + ) def _apply_conseration_law_sub( @@ -1245,7 +1312,9 @@ def _get_conservation_law_subs( def has_fixed_parameter_ic( - specie: pysb.core.ComplexPattern, pysb_model: pysb.Model, ode_model: DEModel + specie: pysb.core.ComplexPattern, + pysb_model: pysb.Model, + ode_model: DEModel, ) -> bool: """ Wrapper to interface @@ -1271,7 +1340,9 @@ def has_fixed_parameter_ic( ( ic for ic, condition in enumerate(pysb_model.initials) - if pysb.pattern.match_complex_pattern(condition[0], specie, exact=True) + if pysb.pattern.match_complex_pattern( + condition[0], specie, exact=True + ) ), None, ) @@ -1304,7 +1375,9 @@ def extract_monomers( ] -def _get_unconserved_monomers(rule: pysb.Rule, pysb_model: pysb.Model) -> Set[str]: +def _get_unconserved_monomers( + rule: pysb.Rule, pysb_model: pysb.Model +) -> Set[str]: """ Constructs the set of monomer names for which the specified rule changes the stoichiometry of the monomer in the specified model. @@ -1320,11 +1393,16 @@ def _get_unconserved_monomers(rule: pysb.Rule, pysb_model: pysb.Model) -> Set[st """ unconserved_monomers = set() - if not rule.delete_molecules and len(rule.product_pattern.complex_patterns) == 0: + if ( + not rule.delete_molecules + and len(rule.product_pattern.complex_patterns) == 0 + ): # if delete_molecules is not True but we have a degradation rule, # we have to actually go through the reactions that are created by # the rule - for reaction in [r for r in pysb_model.reactions if rule.name in r["rule"]]: + for reaction in [ + r for r in pysb_model.reactions if rule.name in r["rule"] + ]: unconserved_monomers |= _get_changed_stoichiometries( [pysb_model.species[ix] for ix in reaction["reactants"]], [pysb_model.species[ix] for ix in reaction["products"]], @@ -1377,7 +1455,9 @@ def pysb_model_from_path(pysb_model_file: Union[str, Path]) -> pysb.Model: :return: The pysb Model instance """ - pysb_model_module_name = os.path.splitext(os.path.split(pysb_model_file)[-1])[0] + pysb_model_module_name = os.path.splitext( + os.path.split(pysb_model_file)[-1] + )[0] import importlib.util diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index 98313c3d77..dd24b98cf8 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -211,7 +211,9 @@ def _process_document(self) -> None: """ # Ensure we got a valid SBML model, otherwise further processing # might lead to undefined results - log_execution_time("validating SBML", logger)(self.sbml_doc.validateSBML)() + log_execution_time("validating SBML", logger)( + self.sbml_doc.validateSBML + )() _check_lib_sbml_errors(self.sbml_doc, self.show_sbml_warnings) # Flatten "comp" model? Do that before any other converters are run @@ -251,7 +253,9 @@ def _process_document(self) -> None: self.sbml_doc.convert )(convert_config) - convert_config = sbml.SBMLLocalParameterConverter().getDefaultProperties() + convert_config = ( + sbml.SBMLLocalParameterConverter().getDefaultProperties() + ) log_execution_time("converting SBML local parameters", logger)( self.sbml_doc.convert )(convert_config) @@ -471,7 +475,9 @@ def _build_ode_model( See :py:func:`sbml2amici` for parameters. """ - constant_parameters = list(constant_parameters) if constant_parameters else [] + constant_parameters = ( + list(constant_parameters) if constant_parameters else [] + ) hardcode_symbols = set(hardcode_symbols) if hardcode_symbols else {} if invalid := (set(constant_parameters) & set(hardcode_symbols)): @@ -494,10 +500,13 @@ def _build_ode_model( self._reset_symbols() self.sbml_parser_settings.setParseLog( - sbml.L3P_PARSE_LOG_AS_LOG10 if log_as_log10 else sbml.L3P_PARSE_LOG_AS_LN + sbml.L3P_PARSE_LOG_AS_LOG10 + if log_as_log10 + else sbml.L3P_PARSE_LOG_AS_LN ) self._process_sbml( - constant_parameters=constant_parameters, hardcode_symbols=hardcode_symbols + constant_parameters=constant_parameters, + hardcode_symbols=hardcode_symbols, ) if ( @@ -530,7 +539,9 @@ def _build_ode_model( simplify=simplify, cache_simplify=cache_simplify, ) - ode_model.import_from_sbml_importer(self, compute_cls=compute_conservation_laws) + ode_model.import_from_sbml_importer( + self, compute_cls=compute_conservation_laws + ) return ode_model @log_execution_time("importing SBML", logger) @@ -552,7 +563,8 @@ def _process_sbml( self.check_support() self._gather_locals(hardcode_symbols=hardcode_symbols) self._process_parameters( - constant_parameters=constant_parameters, hardcode_symbols=hardcode_symbols + constant_parameters=constant_parameters, + hardcode_symbols=hardcode_symbols, ) self._process_compartments() self._process_species() @@ -581,7 +593,10 @@ def check_support(self) -> None: # the "required" attribute is only available in SBML Level 3 for i_plugin in range(self.sbml.getNumPlugins()): plugin = self.sbml.getPlugin(i_plugin) - if self.sbml_doc.getPkgRequired(plugin.getPackageName()) is False: + if ( + self.sbml_doc.getPkgRequired(plugin.getPackageName()) + is False + ): # if not "required", this has no impact on model # simulation, and we can safely ignore it @@ -597,7 +612,9 @@ def check_support(self) -> None: raise SBMLException( "The following fbc extension elements are " "currently not supported: " - + ", ".join(list(map(str, plugin.getListOfAllElements()))) + + ", ".join( + list(map(str, plugin.getListOfAllElements())) + ) ) continue @@ -669,7 +686,8 @@ def check_event_support(self) -> None: trigger_sbml = event.getTrigger() if trigger_sbml is None: logger.warning( - f"Event {event_id} trigger has no trigger, " "so will be skipped." + f"Event {event_id} trigger has no trigger, " + "so will be skipped." ) continue if trigger_sbml.getMath() is None: @@ -696,7 +714,9 @@ def _gather_locals(self, hardcode_symbols: Sequence[str] = None) -> None: self._gather_base_locals(hardcode_symbols=hardcode_symbols) self._gather_dependent_locals() - def _gather_base_locals(self, hardcode_symbols: Sequence[str] = None) -> None: + def _gather_base_locals( + self, hardcode_symbols: Sequence[str] = None + ) -> None: """ Populate self.local_symbols with pure symbol definitions that do not depend on any other symbol. @@ -741,7 +761,10 @@ def _gather_base_locals(self, hardcode_symbols: Sequence[str] = None) -> None: for x_ref in _get_list_of_species_references(self.sbml): if not x_ref.isSetId(): continue - if x_ref.isSetStoichiometry() and not self.is_assignment_rule_target(x_ref): + if ( + x_ref.isSetStoichiometry() + and not self.is_assignment_rule_target(x_ref) + ): value = sp.Float(x_ref.getStoichiometry()) else: value = _get_identifier_symbol(x_ref) @@ -761,7 +784,8 @@ def _gather_dependent_locals(self): if not r.isSetId(): continue self.add_local_symbol( - r.getId(), self._sympy_from_sbml_math(r.getKineticLaw() or sp.Float(0)) + r.getId(), + self._sympy_from_sbml_math(r.getKineticLaw() or sp.Float(0)), ) def add_local_symbol(self, key: str, value: sp.Expr): @@ -819,7 +843,9 @@ def _process_species(self) -> None: Get species information from SBML model. """ if self.sbml.isSetConversionFactor(): - conversion_factor = symbol_with_assumptions(self.sbml.getConversionFactor()) + conversion_factor = symbol_with_assumptions( + self.sbml.getConversionFactor() + ) else: conversion_factor = 1 @@ -831,7 +857,9 @@ def _process_species(self) -> None: "compartment": _get_species_compartment_symbol(s), "constant": s.getConstant() or s.getBoundaryCondition(), "amount": s.getHasOnlySubstanceUnits(), - "conversion_factor": symbol_with_assumptions(s.getConversionFactor()) + "conversion_factor": symbol_with_assumptions( + s.getConversionFactor() + ) if s.isSetConversionFactor() else conversion_factor, "index": len(self.symbols[SymbolId.SPECIES]), @@ -856,7 +884,9 @@ def _process_species_initial(self): # targets to have InitialAssignments. species = self.symbols[SymbolId.SPECIES].get(species_id, None) - ia_initial = self._get_element_initial_assignment(species_variable.getId()) + ia_initial = self._get_element_initial_assignment( + species_variable.getId() + ) if ia_initial is not None: initial = ia_initial if species: @@ -869,12 +899,15 @@ def _process_species_initial(self): all_rateof_dummies.append(rateof_dummies) # don't assign this since they need to stay in order - sorted_species = toposort_symbols(self.symbols[SymbolId.SPECIES], "init") + sorted_species = toposort_symbols( + self.symbols[SymbolId.SPECIES], "init" + ) for species, rateof_dummies in zip( self.symbols[SymbolId.SPECIES].values(), all_rateof_dummies ): species["init"] = _dummy_to_rateof( - smart_subs_dict(species["init"], sorted_species, "init"), rateof_dummies + smart_subs_dict(species["init"], sorted_species, "init"), + rateof_dummies, ) @log_execution_time("processing SBML rate rules", logger) @@ -961,7 +994,9 @@ def add_d_dt( variable0 = smart_subs(variable0, species_id, species["init"]) for species in self.symbols[SymbolId.SPECIES].values(): - species["init"] = smart_subs(species["init"], variable, variable0) + species["init"] = smart_subs( + species["init"], variable, variable0 + ) # add compartment/parameter species self.symbols[SymbolId.SPECIES][variable] = { @@ -1028,7 +1063,8 @@ def _process_parameters( ] for parameter in fixed_parameters: if ( - self._get_element_initial_assignment(parameter.getId()) is not None + self._get_element_initial_assignment(parameter.getId()) + is not None or self.is_assignment_rule_target(parameter) or self.is_rate_rule_target(parameter) ): @@ -1069,8 +1105,12 @@ def _process_parameters( for par in self.sbml.getListOfParameters(): if ( ia := self._get_element_initial_assignment(par.getId()) - ) is not None and ia.find(sp.core.function.UndefinedFunction("rateOf")): - self.symbols[SymbolId.EXPRESSION][_get_identifier_symbol(par)] = { + ) is not None and ia.find( + sp.core.function.UndefinedFunction("rateOf") + ): + self.symbols[SymbolId.EXPRESSION][ + _get_identifier_symbol(par) + ] = { "name": par.getName() if par.isSetName() else par.getId(), "value": ia, } @@ -1123,9 +1163,9 @@ def _process_reactions(self): # rate of change in species concentration) now occurs # in the `dx_dt` method in "de_export.py", which also # accounts for possibly variable compartments. - self.stoichiometric_matrix[species["index"], reaction_index] += ( - sign * stoichiometry * species["conversion_factor"] - ) + self.stoichiometric_matrix[ + species["index"], reaction_index + ] += (sign * stoichiometry * species["conversion_factor"]) if reaction.isSetId(): sym_math = self._local_symbols[reaction.getId()] else: @@ -1201,9 +1241,9 @@ def _process_rule_algebraic(self, rule: sbml.AlgebraicRule): continue # and there must also not be a rate rule or assignment # rule for it - if self.is_assignment_rule_target(sbml_var) or self.is_rate_rule_target( + if self.is_assignment_rule_target( sbml_var - ): + ) or self.is_rate_rule_target(sbml_var): continue # Furthermore, if the entity is a Species object, its value # must not be determined by reactions, which means that it @@ -1217,10 +1257,15 @@ def _process_rule_algebraic(self, rule: sbml.AlgebraicRule): ) is_involved_in_reaction = is_species and not smart_is_zero_matrix( self.stoichiometric_matrix[ - list(self.symbols[SymbolId.SPECIES].keys()).index(symbol), : + list(self.symbols[SymbolId.SPECIES].keys()).index(symbol), + :, ] ) - if is_species and not is_boundary_condition and is_involved_in_reaction: + if ( + is_species + and not is_boundary_condition + and is_involved_in_reaction + ): continue free_variables.add(symbol) @@ -1270,14 +1315,22 @@ def _process_rule_algebraic(self, rule: sbml.AlgebraicRule): symbol["init"] = sp.Float(symbol.pop("value")) # if not a species, add a zeros row to the stoichiometric # matrix - if (isinstance(symbol["init"], float) and np.isnan(symbol["init"])) or ( - isinstance(symbol["init"], sp.Number) and symbol["init"] == sp.nan + if ( + isinstance(symbol["init"], float) + and np.isnan(symbol["init"]) + ) or ( + isinstance(symbol["init"], sp.Number) + and symbol["init"] == sp.nan ): # placeholder, needs to be determined in IC calculation symbol["init"] = sp.Float(0.0) - self.stoichiometric_matrix = self.stoichiometric_matrix.row_insert( - self.stoichiometric_matrix.shape[0], - sp.SparseMatrix([[0] * self.stoichiometric_matrix.shape[1]]), + self.stoichiometric_matrix = ( + self.stoichiometric_matrix.row_insert( + self.stoichiometric_matrix.shape[0], + sp.SparseMatrix( + [[0] * self.stoichiometric_matrix.shape[1]] + ), + ) ) elif var_ix != self.stoichiometric_matrix.shape[0] - 1: # if not the last col, move it to the end @@ -1353,7 +1406,9 @@ def _convert_event_assignment_parameter_targets_to_species(self): This is for the convenience of only implementing event assignments for "species". """ - parameter_targets = _collect_event_assignment_parameter_targets(self.sbml) + parameter_targets = _collect_event_assignment_parameter_targets( + self.sbml + ) for parameter_target in parameter_targets: # Parameter rate rules already exist as species. if parameter_target in self.symbols[SymbolId.SPECIES]: @@ -1374,7 +1429,9 @@ def _convert_event_assignment_parameter_targets_to_species(self): "Unexpected error. The parameter target of an " "event assignment was processed twice." ) - parameter_def = self.symbols[symbol_id].pop(parameter_target) + parameter_def = self.symbols[symbol_id].pop( + parameter_target + ) if parameter_def is None: # this happens for parameters that have initial assignments # or are assignment rule targets @@ -1382,7 +1439,9 @@ def _convert_event_assignment_parameter_targets_to_species(self): ia_init = self._get_element_initial_assignment(par.getId()) parameter_def = { "name": par.getName() if par.isSetName() else par.getId(), - "value": sp.Float(par.getValue()) if ia_init is None else ia_init, + "value": sp.Float(par.getValue()) + if ia_init is None + else ia_init, } # Fixed parameters are added as species such that they can be # targets of events. @@ -1423,9 +1482,9 @@ def get_empty_bolus_value() -> sp.Float: # Species has a compartment "compartment" in species_def ): - concentration_species_by_compartment[species_def["compartment"]].append( - species - ) + concentration_species_by_compartment[ + species_def["compartment"] + ].append(species) for ievent, event in enumerate(events): # get the event id (which is optional unfortunately) @@ -1448,7 +1507,9 @@ def get_empty_bolus_value() -> sp.Float: event_assignments = event.getListOfEventAssignments() compartment_event_assignments = set() for event_assignment in event_assignments: - variable_sym = symbol_with_assumptions(event_assignment.getVariable()) + variable_sym = symbol_with_assumptions( + event_assignment.getVariable() + ) if event_assignment.getMath() is None: # Ignore event assignments with no change in value. continue @@ -1477,7 +1538,10 @@ def get_empty_bolus_value() -> sp.Float: if variable_sym in concentration_species_by_compartment: compartment_event_assignments.add(variable_sym) - for comp, assignment in self.compartment_assignment_rules.items(): + for ( + comp, + assignment, + ) in self.compartment_assignment_rules.items(): if variable_sym not in assignment.free_symbols: continue compartment_event_assignments.add(comp) @@ -1510,10 +1574,14 @@ def get_empty_bolus_value() -> sp.Float: for index in range(len(bolus)): if bolus[index] != get_empty_bolus_value(): bolus[index] -= state_vector[index] - bolus[index] = bolus[index].subs(get_empty_bolus_value(), sp.Float(0.0)) + bolus[index] = bolus[index].subs( + get_empty_bolus_value(), sp.Float(0.0) + ) initial_value = ( - trigger_sbml.getInitialValue() if trigger_sbml is not None else True + trigger_sbml.getInitialValue() + if trigger_sbml is not None + else True ) if self.symbols[SymbolId.ALGEBRAIC_EQUATION] and not initial_value: # in principle this could be implemented, requires running @@ -1559,7 +1627,9 @@ def _process_observables( See :py:func:`sbml2amici`. """ - _validate_observables(observables, sigmas, noise_distributions, events=False) + _validate_observables( + observables, sigmas, noise_distributions, events=False + ) # add user-provided observables or make all species, and compartments # with assignment rules, observable @@ -1581,7 +1651,9 @@ def _process_observables( # check for nesting of observables (unsupported) observable_syms = set(self.symbols[SymbolId.OBSERVABLE].keys()) for obs in self.symbols[SymbolId.OBSERVABLE].values(): - if any(sym in observable_syms for sym in obs["value"].free_symbols): + if any( + sym in observable_syms for sym in obs["value"].free_symbols + ): raise ValueError( "Nested observables are not supported, " f"but observable `{obs['name']} = {obs['value']}` " @@ -1590,7 +1662,9 @@ def _process_observables( elif observables is None: self._generate_default_observables() - _check_symbol_nesting(self.symbols[SymbolId.OBSERVABLE], "eventObservable") + _check_symbol_nesting( + self.symbols[SymbolId.OBSERVABLE], "eventObservable" + ) self._process_log_likelihood(sigmas, noise_distributions) @@ -1618,14 +1692,20 @@ def _process_event_observables( return _validate_observables( - event_observables, event_sigmas, event_noise_distributions, events=True + event_observables, + event_sigmas, + event_noise_distributions, + events=True, ) # gather local symbols before parsing observable and sigma formulas for obs, definition in event_observables.items(): self.add_local_symbol(obs, symbol_with_assumptions(obs)) # check corresponding event exists - if sp.Symbol(definition["event"]) not in self.symbols[SymbolId.EVENT]: + if ( + sp.Symbol(definition["event"]) + not in self.symbols[SymbolId.EVENT] + ): raise ValueError( "Could not find an event with the event identifier " f'{definition["event"]} for the event observable with name' @@ -1663,7 +1743,10 @@ def _process_event_observables( event_sigmas, event_noise_distributions, events=True ) self._process_log_likelihood( - event_sigmas, event_noise_distributions, events=True, event_reg=True + event_sigmas, + event_noise_distributions, + events=True, + event_reg=True, ) def _generate_default_observables(self): @@ -1755,14 +1838,17 @@ def _process_log_likelihood( self.symbols[sigma_symbol] = { symbol_with_assumptions(f"sigma_{obs_id}"): { "name": f'sigma_{obs["name"]}', - "value": self._sympy_from_sbml_math(sigmas.get(str(obs_id), "1.0")), + "value": self._sympy_from_sbml_math( + sigmas.get(str(obs_id), "1.0") + ), } for obs_id, obs in self.symbols[obs_symbol].items() } self.symbols[llh_symbol] = {} for (obs_id, obs), (sigma_id, sigma) in zip( - self.symbols[obs_symbol].items(), self.symbols[sigma_symbol].items() + self.symbols[obs_symbol].items(), + self.symbols[sigma_symbol].items(), ): symbol = symbol_with_assumptions(f"J{obs_id}") dist = noise_distributions.get(str(obs_id), "normal") @@ -1805,7 +1891,9 @@ def _process_initial_assignments(self): continue sym_math = self._make_initial( - smart_subs_dict(sym_math, self.symbols[SymbolId.EXPRESSION], "value") + smart_subs_dict( + sym_math, self.symbols[SymbolId.EXPRESSION], "value" + ) ) self.initial_assignments[_get_identifier_symbol(ia)] = sym_math @@ -1868,7 +1956,9 @@ def _make_initial( if "init" in species: sym_math = smart_subs(sym_math, species_id, species["init"]) - sym_math = smart_subs(sym_math, self._local_symbols["time"], sp.Float(0)) + sym_math = smart_subs( + sym_math, self._local_symbols["time"], sp.Float(0) + ) sym_math = _dummy_to_rateof(sym_math, rateof_to_dummy) @@ -1904,7 +1994,9 @@ def process_conservation_laws(self, ode_model) -> None: # add algebraic variables to species_solver as they were ignored above ndifferential = len(ode_model._differential_states) nalgebraic = len(ode_model._algebraic_states) - species_solver.extend(list(range(ndifferential, ndifferential + nalgebraic))) + species_solver.extend( + list(range(ndifferential, ndifferential + nalgebraic)) + ) # Check, whether species_solver is empty now. As currently, AMICI # cannot handle ODEs without species, CLs must be switched off in this @@ -1914,7 +2006,9 @@ def process_conservation_laws(self, ode_model) -> None: species_solver = list(range(ode_model.num_states_rdata())) # prune out species from stoichiometry and - self.stoichiometric_matrix = self.stoichiometric_matrix[species_solver, :] + self.stoichiometric_matrix = self.stoichiometric_matrix[ + species_solver, : + ] # add the found CLs to the ode_model for cl in conservation_laws: @@ -1934,9 +2028,13 @@ def _get_conservation_laws_demartino( quantity (including the eliminated one) (2) coefficients for the species in (1) """ - from .conserved_quantities_demartino import compute_moiety_conservation_laws + from .conserved_quantities_demartino import ( + compute_moiety_conservation_laws, + ) - sm = self.stoichiometric_matrix[: len(self.symbols[SymbolId.SPECIES]), :] + sm = self.stoichiometric_matrix[ + : len(self.symbols[SymbolId.SPECIES]), : + ] try: stoichiometric_list = [float(entry) for entry in sm.T.flat()] @@ -1959,7 +2057,9 @@ def _get_conservation_laws_demartino( stoichiometric_list, *sm.shape, rng_seed=32, - species_names=[str(x.get_id()) for x in ode_model._differential_states], + species_names=[ + str(x.get_id()) for x in ode_model._differential_states + ], ) # Sparsify conserved quantities @@ -1971,7 +2071,9 @@ def _get_conservation_laws_demartino( # `A * x0 = total_cl` and bring it to reduced row echelon form. The # pivot species are the ones to be eliminated. The resulting state # expressions are sparse and void of any circular dependencies. - A = sp.zeros(len(cls_coefficients), len(ode_model._differential_states)) + A = sp.zeros( + len(cls_coefficients), len(ode_model._differential_states) + ) for i_cl, (cl, coefficients) in enumerate( zip(cls_state_idxs, cls_coefficients) ): @@ -1989,7 +2091,9 @@ def _get_conservation_laws_demartino( ) return raw_cls - def _get_conservation_laws_rref(self) -> List[Tuple[int, List[int], List[float]]]: + def _get_conservation_laws_rref( + self, + ) -> List[Tuple[int, List[int], List[float]]]: """Identify conservation laws based on left nullspace of the stoichiometric matrix, computed through (numeric) Gaussian elimination @@ -2006,7 +2110,9 @@ def _get_conservation_laws_rref(self) -> List[Tuple[int, List[int], List[float]] try: S = np.asarray( - self.stoichiometric_matrix[: len(self.symbols[SymbolId.SPECIES]), :], + self.stoichiometric_matrix[ + : len(self.symbols[SymbolId.SPECIES]), : + ], dtype=float, ) except TypeError: @@ -2202,10 +2308,15 @@ def _replace_in_all_expressions( if old not in self.symbols[symbol]: continue self.symbols[symbol] = { - smart_subs(k, old, new): v for k, v in self.symbols[symbol].items() + smart_subs(k, old, new): v + for k, v in self.symbols[symbol].items() } - for symbol in [SymbolId.OBSERVABLE, SymbolId.LLHY, SymbolId.SIGMAY]: + for symbol in [ + SymbolId.OBSERVABLE, + SymbolId.LLHY, + SymbolId.SIGMAY, + ]: if old not in self.symbols[symbol]: continue self.symbols[symbol][new] = self.symbols[symbol][old] @@ -2238,7 +2349,9 @@ def _replace_in_all_expressions( **self.symbols[SymbolId.SPECIES], **self.symbols[SymbolId.ALGEBRAIC_STATE], }.values(): - state["init"] = smart_subs(state["init"], old, self._make_initial(new)) + state["init"] = smart_subs( + state["init"], old, self._make_initial(new) + ) if "dt" in state: state["dt"] = smart_subs(state["dt"], old, new) @@ -2299,7 +2412,8 @@ def _sympy_from_sbml_math( try: try: formula = sp.sympify( - _parse_logical_operators(math_string), locals=self._local_symbols + _parse_logical_operators(math_string), + locals=self._local_symbols, ) except TypeError as err: if str(err) == "BooleanAtom not allowed in this context.": @@ -2322,10 +2436,14 @@ def _sympy_from_sbml_math( if isinstance(formula, sp.Expr): formula = _parse_special_functions_sbml(formula) - _check_unsupported_functions_sbml(formula, expression_type=ele_name) + _check_unsupported_functions_sbml( + formula, expression_type=ele_name + ) return formula - def _get_element_initial_assignment(self, element_id: str) -> Union[sp.Expr, None]: + def _get_element_initial_assignment( + self, element_id: str + ) -> Union[sp.Expr, None]: """ Extract value of sbml variable according to its initial assignment @@ -2420,7 +2538,8 @@ def _check_lib_sbml_errors( error = sbml_doc.getError(i_error) # we ignore any info messages for now if error.getSeverity() >= sbml.LIBSBML_SEV_ERROR or ( - show_warnings and error.getSeverity() >= sbml.LIBSBML_SEV_WARNING + show_warnings + and error.getSeverity() >= sbml.LIBSBML_SEV_WARNING ): logger.error( f"libSBML {error.getCategoryAsString()} " @@ -2429,7 +2548,9 @@ def _check_lib_sbml_errors( ) if num_error + num_fatal: - raise SBMLException("SBML Document failed to load (see error messages above)") + raise SBMLException( + "SBML Document failed to load (see error messages above)" + ) def _parse_event_trigger(trigger: sp.Expr) -> sp.Expr: @@ -2450,13 +2571,17 @@ def _parse_event_trigger(trigger: sp.Expr) -> sp.Expr: # convert relational expressions into trigger functions if isinstance( - trigger, (sp.core.relational.LessThan, sp.core.relational.StrictLessThan) + trigger, + (sp.core.relational.LessThan, sp.core.relational.StrictLessThan), ): # y < x or y <= x return -root if isinstance( trigger, - (sp.core.relational.GreaterThan, sp.core.relational.StrictGreaterThan), + ( + sp.core.relational.GreaterThan, + sp.core.relational.StrictGreaterThan, + ), ): # y >= x or y > x return root @@ -2676,7 +2801,9 @@ def _check_unsupported_functions_sbml( raise SBMLException(str(err)) -def _parse_special_functions_sbml(sym: sp.Expr, toplevel: bool = True) -> sp.Expr: +def _parse_special_functions_sbml( + sym: sp.Expr, toplevel: bool = True +) -> sp.Expr: try: return _parse_special_functions(sym, toplevel) except RuntimeError as err: diff --git a/python/sdist/amici/sbml_utils.py b/python/sdist/amici/sbml_utils.py index cce2a6c4fa..66c9d01bbc 100644 --- a/python/sdist/amici/sbml_utils.py +++ b/python/sdist/amici/sbml_utils.py @@ -161,7 +161,9 @@ def add_species( ) compartment_id = compartments[0].getId() elif not model.getCompartment(compartment_id): - raise SbmlMissingComponentIdError(f"No compartment with ID {compartment_id}.") + raise SbmlMissingComponentIdError( + f"No compartment with ID {compartment_id}." + ) sp = model.createSpecies() if sp.setIdAttribute(species_id) != libsbml.LIBSBML_OPERATION_SUCCESS: @@ -532,6 +534,8 @@ def _parse_logical_operators( return math_str if " xor(" in math_str or " Xor(" in math_str: - raise SBMLException("Xor is currently not supported as logical " "operation.") + raise SBMLException( + "Xor is currently not supported as logical " "operation." + ) return (math_str.replace("&&", "&")).replace("||", "|") diff --git a/python/sdist/amici/splines.py b/python/sdist/amici/splines.py index bb82b692c6..fdb0912045 100644 --- a/python/sdist/amici/splines.py +++ b/python/sdist/amici/splines.py @@ -11,7 +11,17 @@ if TYPE_CHECKING: from numbers import Real - from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Union + from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Sequence, + Set, + Tuple, + Union, + ) from . import sbml_import @@ -112,19 +122,25 @@ def __init__( stop = sp.nsimplify(sp.sympify(stop)) if step is None: if number_of_nodes is None: - raise ValueError("One of step/number_of_nodes must be specified!") + raise ValueError( + "One of step/number_of_nodes must be specified!" + ) if not isinstance(number_of_nodes, Integral): raise TypeError("Length must be an integer!") if number_of_nodes < 2: raise ValueError("Length must be at least 2!") step = (stop - start) / (number_of_nodes - 1) elif number_of_nodes is not None: - raise ValueError("Only one of step/number_of_nodes can be specified!") + raise ValueError( + "Only one of step/number_of_nodes can be specified!" + ) else: step = sp.nsimplify(sp.sympify(step)) if start > stop: - raise ValueError(f"Start point {start} greater than stop point {stop}!") + raise ValueError( + f"Start point {start} greater than stop point {stop}!" + ) if step <= 0: raise ValueError(f"Step size {step} must be strictly positive!") @@ -181,7 +197,8 @@ def __array__(self, dtype=None) -> np.ndarray: def __repr__(self) -> str: return ( - f"UniformGrid(start={self.start}, stop={self.stop}, " f"step={self.step})" + f"UniformGrid(start={self.start}, stop={self.stop}, " + f"step={self.step})" ) @@ -301,7 +318,10 @@ def __init__( if not isinstance(evaluate_at, sp.Basic): # It may still be e.g. a list! raise ValueError(f"Invalid evaluate_at = {evaluate_at}!") - if evaluate_at != amici_time_symbol and evaluate_at != sbml_time_symbol: + if ( + evaluate_at != amici_time_symbol + and evaluate_at != sbml_time_symbol + ): logger.warning( "At the moment AMICI only supports evaluate_at = (model time). " "Annotations with correct piecewise MathML formulas " @@ -311,7 +331,9 @@ def __init__( if not isinstance(nodes, UniformGrid): nodes = np.asarray([sympify_noeval(x) for x in nodes]) - values_at_nodes = np.asarray([sympify_noeval(y) for y in values_at_nodes]) + values_at_nodes = np.asarray( + [sympify_noeval(y) for y in values_at_nodes] + ) if len(nodes) != len(values_at_nodes): raise ValueError( @@ -333,7 +355,10 @@ def __init__( ) bc, extrapolate = self._normalize_bc_and_extrapolate(bc, extrapolate) - if bc == ("periodic", "periodic") and values_at_nodes[0] != values_at_nodes[-1]: + if ( + bc == ("periodic", "periodic") + and values_at_nodes[0] != values_at_nodes[-1] + ): raise ValueError( "If the spline is to be periodic, " "the first and last elements of values_at_nodes must be equal!" @@ -554,15 +579,21 @@ def check_if_valid(self, importer: sbml_import.SbmlImporter) -> None: fixed_parameters: List[sp.Symbol] = list( importer.symbols[SymbolId.FIXED_PARAMETER].keys() ) - species: List[sp.Symbol] = list(importer.symbols[SymbolId.SPECIES].keys()) + species: List[sp.Symbol] = list( + importer.symbols[SymbolId.SPECIES].keys() + ) for x in self.nodes: if not x.free_symbols.issubset(fixed_parameters): - raise ValueError("nodes should only depend on constant parameters!") + raise ValueError( + "nodes should only depend on constant parameters!" + ) for y in self.values_at_nodes: if y.free_symbols.intersection(species): - raise ValueError("values_at_nodes should not depend on model species!") + raise ValueError( + "values_at_nodes should not depend on model species!" + ) fixed_parameters_values = [ importer.symbols[SymbolId.FIXED_PARAMETER][fp]["value"] @@ -575,7 +606,9 @@ def check_if_valid(self, importer: sbml_import.SbmlImporter) -> None: if not np.all(np.diff(nodes_values) >= 0): raise ValueError("nodes should be strictly increasing!") - def poly(self, i: Integral, *, x: Union[Real, sp.Basic] = None) -> sp.Basic: + def poly( + self, i: Integral, *, x: Union[Real, sp.Basic] = None + ) -> sp.Basic: """ Get the polynomial interpolant on the ``(nodes[i], nodes[i+1])`` interval. The polynomial is written in Horner form with respect to the scaled @@ -623,7 +656,9 @@ def poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic: return self._poly_variable(x, i) @abstractmethod - def _poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic: + def _poly_variable( + self, x: Union[Real, sp.Basic], i: Integral + ) -> sp.Basic: """This function (and not poly_variable) should be implemented by the subclasses""" raise NotImplementedError() @@ -776,13 +811,17 @@ def _formula( x = self._to_base_interval(x) extr_left, extr_right = None, None else: - extr_left, extr_right = self._extrapolation_formulas(x, extrapolate) + extr_left, extr_right = self._extrapolation_formulas( + x, extrapolate + ) if extr_left is not None: pieces.append((extr_left, x < self.nodes[0])) for i in range(len(self.nodes) - 2): - pieces.append((self.segment_formula(i, x=x), x < self.nodes[i + 1])) + pieces.append( + (self.segment_formula(i, x=x), x < self.nodes[i + 1]) + ) if extr_right is not None: pieces.append((self.segment_formula(-1, x=x), x < self.nodes[-1])) @@ -818,7 +857,9 @@ def _to_base_interval( """For periodic splines, maps the real point `x` to the reference period.""" if self.bc != ("periodic", "periodic"): - raise ValueError("_to_base_interval makes no sense with non-periodic bc") + raise ValueError( + "_to_base_interval makes no sense with non-periodic bc" + ) xA = self.nodes[0] xB = self.nodes[-1] @@ -861,7 +902,9 @@ def squared_L2_norm_of_curvature(self) -> sp.Basic: integral = sp.sympify(0) for i in range(len(self.nodes) - 1): formula = self.poly(i, x=x).diff(x, 2) ** 2 - integral += sp.integrate(formula, (x, self.nodes[i], self.nodes[i + 1])) + integral += sp.integrate( + formula, (x, self.nodes[i], self.nodes[i + 1]) + ) return sp.simplify(integral) def integrate( @@ -892,7 +935,9 @@ def integrate( return formula.integrate((x, z0, z1)) if k0 + 1 == k1: - return formula.integrate((x, z0, xB)) + formula.integrate((x, xA, z1)) + return formula.integrate((x, z0, xB)) + formula.integrate( + (x, xA, z1) + ) return ( formula.integrate((x, z0, xB)) @@ -969,7 +1014,9 @@ def _annotation_children(self) -> Dict[str, Union[str, List[str]]]: assert amici_time_symbol not in x.free_symbols children["spline_grid"] = [sbml_mathml(x) for x in self.nodes] - children["spline_values"] = [sbml_mathml(y) for y in self.values_at_nodes] + children["spline_values"] = [ + sbml_mathml(y) for y in self.values_at_nodes + ] return children @@ -1032,10 +1079,12 @@ def add_to_sbml_model( # Autoadd parameters if auto_add is True or auto_add == "spline": - if not model.getParameter(str(self.sbml_id)) and not model.getSpecies( + if not model.getParameter( str(self.sbml_id) - ): - add_parameter(model, self.sbml_id, constant=False, units=y_units) + ) and not model.getSpecies(str(self.sbml_id)): + add_parameter( + model, self.sbml_id, constant=False, units=y_units + ) if auto_add is True: if isinstance(x_nominal, collections.abc.Sequence): @@ -1046,7 +1095,9 @@ def add_to_sbml_model( ) for i in range(len(x_nominal) - 1): if x[i] >= x[i + 1]: - raise ValueError("x_nominal must be strictly increasing!") + raise ValueError( + "x_nominal must be strictly increasing!" + ) elif x_nominal is None: x_nominal = len(self.nodes) * [None] else: @@ -1055,7 +1106,9 @@ def add_to_sbml_model( raise TypeError("x_nominal must be a Sequence!") for _x, _val in zip(self.nodes, x_nominal): if _x.is_Symbol and not model.getParameter(_x.name): - add_parameter(model, _x.name, value=_val, units=x_units) + add_parameter( + model, _x.name, value=_val, units=x_units + ) if isinstance(y_nominal, collections.abc.Sequence): if len(y_nominal) != len(self.values_at_nodes): @@ -1109,7 +1162,9 @@ def add_to_sbml_model( k = sp.Piecewise((3, sp.cos(s) < 0), (1, True)) formula = x0 + T * (sp.atan(sp.tan(s)) / (2 * sp.pi) + k / 4) assert amici_time_symbol not in formula.free_symbols - par = add_parameter(model, parameter_id, constant=False, units=x_units) + par = add_parameter( + model, parameter_id, constant=False, units=x_units + ) retcode = par.setAnnotation( f'' ) @@ -1117,13 +1172,17 @@ def add_to_sbml_model( raise SbmlAnnotationError("Could not set SBML annotation!") add_assignment_rule(model, parameter_id, formula) - def _replace_in_all_expressions(self, old: sp.Symbol, new: sp.Symbol) -> None: + def _replace_in_all_expressions( + self, old: sp.Symbol, new: sp.Symbol + ) -> None: if self.sbml_id == old: self._sbml_id = new self._x = self.evaluate_at.subs(old, new) if not isinstance(self.nodes, UniformGrid): self._nodes = [x.subs(old, new) for x in self.nodes] - self._values_at_nodes = [y.subs(old, new) for y in self.values_at_nodes] + self._values_at_nodes = [ + y.subs(old, new) for y in self.values_at_nodes + ] @staticmethod def is_spline(rule: libsbml.AssignmentRule) -> bool: @@ -1135,7 +1194,9 @@ def is_spline(rule: libsbml.AssignmentRule) -> bool: return AbstractSpline.get_annotation(rule) is not None @staticmethod - def get_annotation(rule: libsbml.AssignmentRule) -> Union[ET.Element, None]: + def get_annotation( + rule: libsbml.AssignmentRule, + ) -> Union[ET.Element, None]: """ Extract AMICI spline annotation from an SBML assignment rule (given as a :py:class:`libsbml.AssignmentRule` object). @@ -1167,7 +1228,9 @@ def from_annotation( must be hard-coded into this function here (at the moment). """ if annotation.tag != f"{{{annotation_namespace}}}spline": - raise ValueError("The given annotation is not an AMICI spline annotation!") + raise ValueError( + "The given annotation is not an AMICI spline annotation!" + ) attributes = {} for key, value in annotation.items(): @@ -1203,14 +1266,17 @@ def from_annotation( if attributes["spline_method"] == "cubic_hermite": cls = CubicHermiteSpline else: - raise ValueError(f"Unknown spline method {attributes['spline_method']}!") + raise ValueError( + f"Unknown spline method {attributes['spline_method']}!" + ) del attributes["spline_method"] kwargs = cls._from_annotation(attributes, children) if attributes: raise ValueError( - "Unprocessed attributes in spline annotation!\n" + str(attributes) + "Unprocessed attributes in spline annotation!\n" + + str(attributes) ) if children: @@ -1281,7 +1347,9 @@ def _from_annotation( ) if "spline_values" not in children: - raise ValueError("Required spline annotation 'spline_values' missing!") + raise ValueError( + "Required spline annotation 'spline_values' missing!" + ) kwargs["values_at_nodes"] = children.pop("spline_values") return kwargs @@ -1300,7 +1368,9 @@ def _parameters(self) -> Set[sp.Symbol]: parameters.update(y.free_symbols) return parameters - def ode_model_symbol(self, importer: sbml_import.SbmlImporter) -> sp.Function: + def ode_model_symbol( + self, importer: sbml_import.SbmlImporter + ) -> sp.Function: """ Returns the `sympy` object to be used by :py:class:`amici.de_export.ODEModel`. @@ -1368,7 +1438,10 @@ def _eval_is_real(self): return True return AmiciSplineSensitivity( - self.args[0], self.args[1], parameters[pindex], *self.args[2:] + self.args[0], + self.args[1], + parameters[pindex], + *self.args[2:], ) def _eval_is_real(self): @@ -1397,9 +1470,13 @@ def plot( nodes = np.asarray(self.nodes) xlim = (float(nodes[0]), float(nodes[-1])) nodes = np.linspace(*xlim, npoints) - ax.plot(nodes, [float(self.evaluate(x).subs(parameters)) for x in nodes]) ax.plot( - self.nodes, [float(y.subs(parameters)) for y in self.values_at_nodes], "o" + nodes, [float(self.evaluate(x).subs(parameters)) for x in nodes] + ) + ax.plot( + self.nodes, + [float(y.subs(parameters)) for y in self.values_at_nodes], + "o", ) if xlabel is not None: ax.set_xlabel(xlabel) @@ -1500,7 +1577,9 @@ def __init__( if not isinstance(nodes, UniformGrid): nodes = np.asarray([sympify_noeval(x) for x in nodes]) - values_at_nodes = np.asarray([sympify_noeval(y) for y in values_at_nodes]) + values_at_nodes = np.asarray( + [sympify_noeval(y) for y in values_at_nodes] + ) if len(nodes) != len(values_at_nodes): # NB this would be checked in AbstractSpline.__init__() @@ -1512,13 +1591,19 @@ def __init__( ) bc, extrapolate = self._normalize_bc_and_extrapolate(bc, extrapolate) - if bc[0] == "zeroderivative+natural" or bc[1] == "zeroderivative+natural": + if ( + bc[0] == "zeroderivative+natural" + or bc[1] == "zeroderivative+natural" + ): raise ValueError( - "zeroderivative+natural bc not supported by " "CubicHermiteSplines!" + "zeroderivative+natural bc not supported by " + "CubicHermiteSplines!" ) if derivatives_at_nodes is None: - derivatives_at_nodes = _finite_differences(nodes, values_at_nodes, bc) + derivatives_at_nodes = _finite_differences( + nodes, values_at_nodes, bc + ) self._derivatives_by_fd = True else: derivatives_at_nodes = np.asarray( @@ -1603,7 +1688,9 @@ def d_scaled(self, i: Integral) -> sp.Expr: return self.derivatives_at_nodes[i] / self.values_at_nodes[i] return self.derivatives_at_nodes[i] - def _poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic: + def _poly_variable( + self, x: Union[Real, sp.Basic], i: Integral + ) -> sp.Basic: assert 0 <= i < len(self.nodes) - 1 dx = self.nodes[i + 1] - self.nodes[i] with evaluate(False): @@ -1645,7 +1732,9 @@ def _parameters(self) -> Set[sp.Symbol]: parameters.update(d.free_symbols) return parameters - def _replace_in_all_expressions(self, old: sp.Symbol, new: sp.Symbol) -> None: + def _replace_in_all_expressions( + self, old: sp.Symbol, new: sp.Symbol + ) -> None: super()._replace_in_all_expressions(old, new) self._derivatives_at_nodes = [ d.subs(old, new) for d in self.derivatives_at_nodes @@ -1680,7 +1769,9 @@ def __str__(self) -> str: return s + " [" + ", ".join(cmps) + "]" -def _finite_differences(xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC) -> np.ndarray: +def _finite_differences( + xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC +) -> np.ndarray: dd = [] if bc[0] == "periodic": @@ -1701,7 +1792,11 @@ def _finite_differences(xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC) -> np. for i in range(1, len(xx) - 1): dd.append( _centered_fd( - yy[i - 1], yy[i], yy[i + 1], xx[i] - xx[i - 1], xx[i + 1] - xx[i] + yy[i - 1], + yy[i], + yy[i + 1], + xx[i] - xx[i - 1], + xx[i + 1] - xx[i], ) ) @@ -1715,7 +1810,9 @@ def _finite_differences(xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC) -> np. "At least 3 nodes are needed " "for computing finite differences with natural bc!" ) - fd = _natural_fd(yy[-1], xx[-2] - xx[-1], yy[-2], xx[-3] - xx[-2], yy[-3]) + fd = _natural_fd( + yy[-1], xx[-2] - xx[-1], yy[-2], xx[-3] - xx[-2], yy[-3] + ) else: fd = _onesided_fd(yy[-2], yy[-1], xx[-1] - xx[-2]) dd.append(fd) diff --git a/python/sdist/amici/swig.py b/python/sdist/amici/swig.py index bfb2964a3a..ef75646389 100644 --- a/python/sdist/amici/swig.py +++ b/python/sdist/amici/swig.py @@ -16,21 +16,29 @@ class TypeHintFixer(ast.NodeTransformer): "size_t": ast.Name("int"), "bool": ast.Name("bool"), "std::unique_ptr< amici::Solver >": ast.Constant("Solver"), - "amici::InternalSensitivityMethod": ast.Constant("InternalSensitivityMethod"), + "amici::InternalSensitivityMethod": ast.Constant( + "InternalSensitivityMethod" + ), "amici::InterpolationType": ast.Constant("InterpolationType"), "amici::LinearMultistepMethod": ast.Constant("LinearMultistepMethod"), "amici::LinearSolver": ast.Constant("LinearSolver"), "amici::Model *": ast.Constant("Model"), "amici::Model const *": ast.Constant("Model"), - "amici::NewtonDampingFactorMode": ast.Constant("NewtonDampingFactorMode"), - "amici::NonlinearSolverIteration": ast.Constant("NonlinearSolverIteration"), + "amici::NewtonDampingFactorMode": ast.Constant( + "NewtonDampingFactorMode" + ), + "amici::NonlinearSolverIteration": ast.Constant( + "NonlinearSolverIteration" + ), "amici::ObservableScaling": ast.Constant("ObservableScaling"), "amici::ParameterScaling": ast.Constant("ParameterScaling"), "amici::RDataReporting": ast.Constant("RDataReporting"), "amici::SensitivityMethod": ast.Constant("SensitivityMethod"), "amici::SensitivityOrder": ast.Constant("SensitivityOrder"), "amici::Solver *": ast.Constant("Solver"), - "amici::SteadyStateSensitivityMode": ast.Constant("SteadyStateSensitivityMode"), + "amici::SteadyStateSensitivityMode": ast.Constant( + "SteadyStateSensitivityMode" + ), "amici::realtype": ast.Name("float"), "DoubleVector": ast.Constant("Sequence[float]"), "IntVector": ast.Name("Sequence[int]"), @@ -72,7 +80,9 @@ def _new_annot(self, old_annot: str): # std::vector value type if ( value_type := re.sub( - r"std::vector< (.*) >::value_type(?: const &)?", r"\1", old_annot + r"std::vector< (.*) >::value_type(?: const &)?", + r"\1", + old_annot, ) ) in self.mapping: return self.mapping[value_type] diff --git a/python/sdist/amici/swig_wrappers.py b/python/sdist/amici/swig_wrappers.py index 50e78daf39..f56f3bd5d2 100644 --- a/python/sdist/amici/swig_wrappers.py +++ b/python/sdist/amici/swig_wrappers.py @@ -84,7 +84,9 @@ def _get_ptr( def runAmiciSimulation( - model: AmiciModel, solver: AmiciSolver, edata: Optional[AmiciExpData] = None + model: AmiciModel, + solver: AmiciSolver, + edata: Optional[AmiciExpData] = None, ) -> "numpy.ReturnDataView": """ Convenience wrapper around :py:func:`amici.amici.runAmiciSimulation` @@ -105,7 +107,8 @@ def runAmiciSimulation( """ if ( model.ne > 0 - and solver.getSensitivityMethod() == amici_swig.SensitivityMethod.adjoint + and solver.getSensitivityMethod() + == amici_swig.SensitivityMethod.adjoint and solver.getSensitivityOrder() == amici_swig.SensitivityOrder.first ): warnings.warn( @@ -166,7 +169,8 @@ def runAmiciSimulations( """ if ( model.ne > 0 - and solver.getSensitivityMethod() == amici_swig.SensitivityMethod.adjoint + and solver.getSensitivityMethod() + == amici_swig.SensitivityMethod.adjoint and solver.getSensitivityOrder() == amici_swig.SensitivityOrder.first ): warnings.warn( @@ -309,7 +313,9 @@ def _log_simulation(rdata: amici_swig.ReturnData): ) -def _ids_and_names_to_rdata(rdata: amici_swig.ReturnData, model: amici_swig.Model): +def _ids_and_names_to_rdata( + rdata: amici_swig.ReturnData, model: amici_swig.Model +): """Copy entity IDs and names from a Model to ReturnData.""" for entity_type in ( "State", diff --git a/python/sdist/pyproject.toml b/python/sdist/pyproject.toml index 3e77875ca1..011064fbdb 100644 --- a/python/sdist/pyproject.toml +++ b/python/sdist/pyproject.toml @@ -14,4 +14,4 @@ requires = [ build-backend = "setuptools.build_meta" [tool.black] -line-length = 80 +line-length = 79 diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 1c07ddacac..9ab64b91d7 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -41,7 +41,9 @@ def sbml_example_presimulation_module(): constant_parameters=constant_parameters, ) - yield amici.import_model_module(module_name=module_name, module_path=outdir) + yield amici.import_model_module( + module_name=module_name, module_path=outdir + ) @pytest.fixture(scope="session") diff --git a/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py b/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py index 4723d3ac36..767c239c5d 100644 --- a/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py +++ b/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py @@ -76,7 +76,9 @@ ) # Transphosphorylation of EGFR by RTK -Rule("egfr_transphos", EGFR(CR1=ANY, Y1068="U") >> EGFR(CR1=ANY, Y1068="P"), kp3) +Rule( + "egfr_transphos", EGFR(CR1=ANY, Y1068="U") >> EGFR(CR1=ANY, Y1068="P"), kp3 +) # Dephosphorylation Rule("egfr_dephos", EGFR(Y1068="P") >> EGFR(Y1068="U"), km3) diff --git a/python/tests/splines_utils.py b/python/tests/splines_utils.py index e1c0c4352c..0746207ddb 100644 --- a/python/tests/splines_utils.py +++ b/python/tests/splines_utils.py @@ -267,7 +267,9 @@ def create_petab_problem( problem.to_files( model_file=os.path.join(folder, f"{model_name}_model.xml"), condition_file=os.path.join(folder, f"{model_name}_conditions.tsv"), - measurement_file=os.path.join(folder, f"{model_name}_measurements.tsv"), + measurement_file=os.path.join( + folder, f"{model_name}_measurements.tsv" + ), parameter_file=os.path.join(folder, f"{model_name}_parameters.tsv"), observable_file=os.path.join(folder, f"{model_name}_observables.tsv"), yaml_file=os.path.join(folder, f"{model_name}.yaml"), @@ -370,15 +372,24 @@ def simulate_splines( ) if petab_problem is None and amici_model is not None: - raise ValueError("if amici_model is given, petab_problem must be given too") + raise ValueError( + "if amici_model is given, petab_problem must be given too" + ) if petab_problem is not None and initial_values is None: - raise ValueError("if petab_problem is given, initial_values must be given too") + raise ValueError( + "if petab_problem is given, initial_values must be given too" + ) if petab_problem is None: # Create PEtab problem path, initial_values, T = create_petab_problem( - splines, params_true, initial_values, sigma=0.0, folder=folder, **kwargs + splines, + params_true, + initial_values, + sigma=0.0, + folder=folder, + **kwargs, ) petab_problem = petab.Problem.from_yaml(path) @@ -462,14 +473,18 @@ def simulate_splines( ) -def compute_ground_truth(splines, initial_values, times, params_true, params_sorted): +def compute_ground_truth( + splines, initial_values, times, params_true, params_sorted +): x_true_sym = sp.Matrix( [ integrate_spline(spline, None, times, iv) for (spline, iv) in zip(splines, initial_values) ] ).transpose() - groundtruth = {"x_true": np.asarray(x_true_sym.subs(params_true), dtype=float)} + groundtruth = { + "x_true": np.asarray(x_true_sym.subs(params_true), dtype=float) + } sx_by_state = [ x_true_sym[:, i].jacobian(params_sorted).subs(params_true) for i in range(x_true_sym.shape[1]) @@ -567,7 +582,9 @@ def check_splines( # Sort splines/ics/parameters as in the AMICI model splines = [splines[species_to_index(name)] for name in state_ids] - initial_values = [initial_values[species_to_index(name)] for name in state_ids] + initial_values = [ + initial_values[species_to_index(name)] for name in state_ids + ] def param_by_name(id): for p in params_true.keys(): @@ -667,7 +684,9 @@ def param_by_name(id): ) elif debug == "print": sx_err_abs = abs(rdata["sx"] - sx_true) - sx_err_rel = np.where(sx_err_abs == 0, 0, sx_err_abs / abs(sx_true)) + sx_err_rel = np.where( + sx_err_abs == 0, 0, sx_err_abs / abs(sx_true) + ) print(f"sx_atol={sx_atol} sx_rtol={sx_rtol}") print("sx_err_abs:") print(np.squeeze(sx_err_abs)) @@ -696,7 +715,9 @@ def param_by_name(id): if sllh_atol is None: sllh_atol = np.finfo(float).eps sllh_err_abs = abs(sllh).max() - if (sllh_err_abs > sllh_atol and debug is not True) or debug == "print": + if ( + sllh_err_abs > sllh_atol and debug is not True + ) or debug == "print": print(f"sllh_atol={sllh_atol}") print(f"sllh_err_abs = {sllh_err_abs}") if not debug: @@ -705,7 +726,11 @@ def param_by_name(id): assert sllh is None # Try different parameter lists - if not skip_sensitivity and (not use_adjoint) and parameter_lists is not None: + if ( + not skip_sensitivity + and (not use_adjoint) + and parameter_lists is not None + ): for plist in parameter_lists: amici_model.setParameterList(plist) amici_model.setTimepoints(rdata.t) @@ -884,7 +909,11 @@ def example_spline_1( params[yy[i]] = yy_true[i] spline = CubicHermiteSpline( - f"y{idx}", nodes=xx, values_at_nodes=yy, bc=None, extrapolate=extrapolate + f"y{idx}", + nodes=xx, + values_at_nodes=yy, + bc=None, + extrapolate=extrapolate, ) if os.name == "nt": @@ -911,7 +940,11 @@ def example_spline_2(idx: int = 0): yy.append(yy[0]) params = dict(zip(yy, yy_true)) spline = CubicHermiteSpline( - f"y{idx}", nodes=xx, values_at_nodes=yy, bc="periodic", extrapolate="periodic" + f"y{idx}", + nodes=xx, + values_at_nodes=yy, + bc="periodic", + extrapolate="periodic", ) tols = ( dict(llh_rtol=1e-15), diff --git a/python/tests/test_compare_conservation_laws_sbml.py b/python/tests/test_compare_conservation_laws_sbml.py index 79a26fd948..4d6a453b52 100644 --- a/python/tests/test_compare_conservation_laws_sbml.py +++ b/python/tests/test_compare_conservation_laws_sbml.py @@ -28,7 +28,9 @@ def edata_fixture(): 2, 0, 0, - np.array([0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, float("inf"), float("inf")]), + np.array( + [0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, float("inf"), float("inf")] + ), ) edata_full.setObservedData([3.14] * 18) edata_full.fixedParameters = np.array([1.0, 2.0]) @@ -129,7 +131,9 @@ def test_compare_conservation_laws_sbml(models, edata_fixture): assert model_without_cl.nx_rdata == model_with_cl.nx_rdata assert model_with_cl.nx_solver < model_without_cl.nx_solver assert len(model_with_cl.getStateIdsSolver()) == model_with_cl.nx_solver - assert len(model_without_cl.getStateIdsSolver()) == model_without_cl.nx_solver + assert ( + len(model_without_cl.getStateIdsSolver()) == model_without_cl.nx_solver + ) # ----- compare simulations wo edata, sensi = 0, states ------------------ # run simulations @@ -140,7 +144,11 @@ def test_compare_conservation_laws_sbml(models, edata_fixture): # compare state trajectories assert_allclose( - rdata["x"], rdata_cl["x"], rtol=1.0e-5, atol=1.0e-8, err_msg="rdata.x mismatch" + rdata["x"], + rdata_cl["x"], + rtol=1.0e-5, + atol=1.0e-8, + err_msg="rdata.x mismatch", ) # ----- compare simulations wo edata, sensi = 1, states and sensis ------- @@ -254,9 +262,15 @@ def test_adjoint_pre_and_post_equilibration(models, edata_fixture): ) # assert all are close - assert_allclose(rff_cl["sllh"], rfa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8) - assert_allclose(rfa_cl["sllh"], raa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8) - assert_allclose(raa_cl["sllh"], rff_cl["sllh"], rtol=1.0e-5, atol=1.0e-8) + assert_allclose( + rff_cl["sllh"], rfa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8 + ) + assert_allclose( + rfa_cl["sllh"], raa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8 + ) + assert_allclose( + raa_cl["sllh"], rff_cl["sllh"], rtol=1.0e-5, atol=1.0e-8 + ) # compare fully adjoint approach to simulation with singular # Jacobian diff --git a/python/tests/test_conserved_quantities_demartino.py b/python/tests/test_conserved_quantities_demartino.py index 3c67d0145a..339743cc4e 100644 --- a/python/tests/test_conserved_quantities_demartino.py +++ b/python/tests/test_conserved_quantities_demartino.py @@ -7,7 +7,9 @@ import sympy as sp from amici.conserved_quantities_demartino import _fill, _kernel from amici.conserved_quantities_demartino import _output as output -from amici.conserved_quantities_demartino import compute_moiety_conservation_laws +from amici.conserved_quantities_demartino import ( + compute_moiety_conservation_laws, +) from amici.logging import get_logger, log_execution_time from amici.testing import skip_on_valgrind @@ -165,7 +167,8 @@ def data_demartino2014(): S = [ int(item) for sl in [ - entry.decode("ascii").strip().split("\t") for entry in data.readlines() + entry.decode("ascii").strip().split("\t") + for entry in data.readlines() ] for item in sl ] @@ -175,7 +178,9 @@ def data_demartino2014(): r"https://github.com/AMICI-dev/AMICI/files/11430970/test-ecoli-met.txt", timeout=10, ) - row_names = [entry.decode("ascii").strip() for entry in io.BytesIO(response.read())] + row_names = [ + entry.decode("ascii").strip() for entry in io.BytesIO(response.read()) + ] return S, row_names @@ -192,7 +197,9 @@ def test_kernel_demartino2014(data_demartino2014, quiet=True): ), "Unexpected dimension of stoichiometric matrix" # Expected number of metabolites per conservation law found after kernel() - expected_num_species = [53] + [2] * 11 + [6] + [3] * 2 + [2] * 15 + [3] + [2] * 5 + expected_num_species = ( + [53] + [2] * 11 + [6] + [3] * 2 + [2] * 15 + [3] + [2] * 5 + ) ( kernel_dim, @@ -220,7 +227,9 @@ def test_kernel_demartino2014(data_demartino2014, quiet=True): assert ( engaged_species == demartino2014_kernel_engaged_species ), "Wrong engaged metabolites reported" - assert len(conserved_moieties) == 128, "Wrong number of conserved moieties reported" + assert ( + len(conserved_moieties) == 128 + ), "Wrong number of conserved moieties reported" # Assert that each conserved moiety has the correct number of metabolites for i in range(int_kernel_dim - 2): @@ -768,7 +777,9 @@ def test_fill_demartino2014(data_demartino2014): assert not any(fields[len(ref_for_fields) :]) -def compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=False): +def compute_moiety_conservation_laws_demartino2014( + data_demartino2014, quiet=False +): """Compute conserved quantities for De Martino's published results for E. coli network""" stoichiometric_list, row_names = data_demartino2014 @@ -781,7 +792,9 @@ def compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=Fal start = perf_counter() cls_state_idxs, cls_coefficients = compute_moiety_conservation_laws( - stoichiometric_list, num_species=num_species, num_reactions=num_reactions + stoichiometric_list, + num_species=num_species, + num_reactions=num_reactions, ) runtime = perf_counter() - start if not quiet: @@ -795,7 +808,9 @@ def compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=Fal def test_compute_moiety_conservation_laws_demartino2014(data_demartino2014): """Invoke test case and benchmarking for De Martino's published results for E. coli network""" - compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=False) + compute_moiety_conservation_laws_demartino2014( + data_demartino2014, quiet=False + ) @skip_on_valgrind @@ -826,7 +841,9 @@ def test_compute_moiety_conservation_laws_simple(): stoichiometric_matrix = sp.Matrix( [[-1.0, 1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0]] ) - stoichiometric_list = [float(entry) for entry in stoichiometric_matrix.T.flat()] + stoichiometric_list = [ + float(entry) for entry in stoichiometric_matrix.T.flat() + ] num_tries = 1000 found_all_n_times = 0 diff --git a/python/tests/test_edata.py b/python/tests/test_edata.py index c2d2ea470b..9c4d9b9edc 100644 --- a/python/tests/test_edata.py +++ b/python/tests/test_edata.py @@ -16,7 +16,9 @@ def test_edata_sensi_unscaling(model_units_module): sx0 = (3, 3, 3, 3) - parameter_scales_log10 = [amici.ParameterScaling.log10.value] * len(parameters0) + parameter_scales_log10 = [amici.ParameterScaling.log10.value] * len( + parameters0 + ) amici_parameter_scales_log10 = amici.parameterScalingFromIntVector( parameter_scales_log10 ) diff --git a/python/tests/test_events.py b/python/tests/test_events.py index c562f3c4fc..d2a177bded 100644 --- a/python/tests/test_events.py +++ b/python/tests/test_events.py @@ -15,10 +15,15 @@ @pytest.fixture( params=[ pytest.param("events_plus_heavisides", marks=skip_on_valgrind), - pytest.param("piecewise_plus_event_simple_case", marks=skip_on_valgrind), - pytest.param("piecewise_plus_event_semi_complicated", marks=skip_on_valgrind), pytest.param( - "piecewise_plus_event_trigger_depends_on_state", marks=skip_on_valgrind + "piecewise_plus_event_simple_case", marks=skip_on_valgrind + ), + pytest.param( + "piecewise_plus_event_semi_complicated", marks=skip_on_valgrind + ), + pytest.param( + "piecewise_plus_event_trigger_depends_on_state", + marks=skip_on_valgrind, ), pytest.param("nested_events", marks=skip_on_valgrind), pytest.param("event_state_dep_ddeltax_dtpx", marks=skip_on_valgrind), @@ -72,7 +77,9 @@ def get_model_definition(model_name): if model_name == "event_state_dep_ddeltax_dtpx": return model_definition_event_state_dep_ddeltax_dtpx() - raise NotImplementedError(f"Model with name {model_name} is not implemented.") + raise NotImplementedError( + f"Model with name {model_name} is not implemented." + ) def model_definition_events_plus_heavisides(): @@ -289,7 +296,9 @@ def x_expected(t, k1, k2, inflow_1, decay_1, decay_2, bolus): def get_early_x(t): # compute dynamics before event - x_1 = equil * (1 - np.exp(-decay_1 * t)) + k1 * np.exp(-decay_1 * t) + x_1 = equil * (1 - np.exp(-decay_1 * t)) + k1 * np.exp( + -decay_1 * t + ) x_2 = k2 * np.exp(-decay_2 * t) return np.array([[x_1], [x_2]]) @@ -303,9 +312,9 @@ def get_early_x(t): # compute dynamics after event inhom = np.exp(decay_1 * event_time) * tau_x1 - x_1 = equil * (1 - np.exp(decay_1 * (event_time - t))) + inhom * np.exp( - -decay_1 * t - ) + x_1 = equil * ( + 1 - np.exp(decay_1 * (event_time - t)) + ) + inhom * np.exp(-decay_1 * t) x_2 = tau_x2 * np.exp(decay_2 * event_time) * np.exp(-decay_2 * t) x = np.array([[x_1], [x_2]]) @@ -360,7 +369,11 @@ def model_definition_piecewise_plus_event_simple_case(): } timepoints = np.linspace(0.0, 5.0, 100) # np.array((0.0, 4.0,)) events = { - "event_1": {"trigger": "time > alpha", "target": "x_1", "assignment": "gamma"}, + "event_1": { + "trigger": "time > alpha", + "target": "x_1", + "assignment": "gamma", + }, "event_2": { "trigger": "time > beta", "target": "x_1", @@ -458,7 +471,8 @@ def x_expected(t, x_1_0, alpha, beta, gamma, delta): else: # after third event triggered x = ( - ((x_1_0 + alpha) * alpha + (beta - alpha)) * delta + (gamma - beta) + ((x_1_0 + alpha) * alpha + (beta - alpha)) * delta + + (gamma - beta) ) ** 2 * 2 + (t - gamma) return np.array((x,)) @@ -541,7 +555,9 @@ def x_expected(t, x_1_0, x_2_0, alpha, beta, gamma, delta, eta): x_1 = x_1_heaviside_1 * np.exp(delta * (t - heaviside_1)) else: x_1_heaviside_1 = gamma * np.exp(-(heaviside_1 - t_event_1)) - x_1_at_event_2 = x_1_heaviside_1 * np.exp(delta * (t_event_2 - heaviside_1)) + x_1_at_event_2 = x_1_heaviside_1 * np.exp( + delta * (t_event_2 - heaviside_1) + ) x_2_at_event_2 = x_2_0 * np.exp(-eta * t_event_2) x1_after_event_2 = x_1_at_event_2 + x_2_at_event_2 x_1 = x1_after_event_2 * np.exp(-(t - t_event_2)) @@ -666,8 +682,12 @@ def sx_expected(t, parameters): def test_models(model): amici_model, parameters, timepoints, x_expected, sx_expected = model - result_expected_x = np.array([x_expected(t, **parameters) for t in timepoints]) - result_expected_sx = np.array([sx_expected(t, parameters) for t in timepoints]) + result_expected_x = np.array( + [x_expected(t, **parameters) for t in timepoints] + ) + result_expected_sx = np.array( + [sx_expected(t, parameters) for t in timepoints] + ) # assert correctness of trajectories check_trajectories_without_sensitivities(amici_model, result_expected_x) diff --git a/python/tests/test_hdf5.py b/python/tests/test_hdf5.py index c47d8653eb..232f22be8c 100644 --- a/python/tests/test_hdf5.py +++ b/python/tests/test_hdf5.py @@ -32,7 +32,9 @@ def _modify_solver_attrs(solver): getattr(solver, attr)(cval) -@pytest.mark.skipif(not amici.hdf5_enabled, reason="AMICI was compiled without HDF5") +@pytest.mark.skipif( + not amici.hdf5_enabled, reason="AMICI was compiled without HDF5" +) def test_solver_hdf5_roundtrip(sbml_example_presimulation_module): """TestCase class for AMICI HDF5 I/O""" diff --git a/python/tests/test_heavisides.py b/python/tests/test_heavisides.py index 4cef7723a6..c3bea26a0c 100644 --- a/python/tests/test_heavisides.py +++ b/python/tests/test_heavisides.py @@ -53,8 +53,12 @@ def model(request): def test_models(model): amici_model, parameters, timepoints, x_expected, sx_expected = model - result_expected_x = np.array([x_expected(t, **parameters) for t in timepoints]) - result_expected_sx = np.array([sx_expected(t, **parameters) for t in timepoints]) + result_expected_x = np.array( + [x_expected(t, **parameters) for t in timepoints] + ) + result_expected_sx = np.array( + [sx_expected(t, **parameters) for t in timepoints] + ) # Does the AMICI simulation match the analytical solution? check_trajectories_without_sensitivities(amici_model, result_expected_x) @@ -71,7 +75,9 @@ def get_model_definition(model_name): elif model_name == "piecewise_many_conditions": return model_definition_piecewise_many_conditions() else: - raise NotImplementedError(f"Model with name {model_name} is not implemented.") + raise NotImplementedError( + f"Model with name {model_name} is not implemented." + ) def model_definition_state_and_parameter_dependent_heavisides(): @@ -136,8 +142,12 @@ def sx_expected(t, alpha, beta, gamma, delta, eta, zeta): sx_1_zeta = np.exp(alpha * t) else: # Never trust Wolfram Alpha... - sx_1_alpha = zeta * tau_1 * np.exp(alpha * tau_1 - beta * (t - tau_1)) - sx_1_beta = zeta * (tau_1 - t) * np.exp(alpha * tau_1 - beta * (t - tau_1)) + sx_1_alpha = ( + zeta * tau_1 * np.exp(alpha * tau_1 - beta * (t - tau_1)) + ) + sx_1_beta = ( + zeta * (tau_1 - t) * np.exp(alpha * tau_1 - beta * (t - tau_1)) + ) sx_1_gamma = ( zeta * (alpha + beta) @@ -176,8 +186,22 @@ def sx_expected(t, alpha, beta, gamma, delta, eta, zeta): sx_2_delta = gamma * np.exp(gamma * delta) - eta sx_2_eta = t - delta - sx_1 = (sx_1_alpha, sx_1_beta, sx_1_gamma, sx_1_delta, sx_1_eta, sx_1_zeta) - sx_2 = (sx_2_alpha, sx_2_beta, sx_2_gamma, sx_2_delta, sx_2_eta, sx_2_zeta) + sx_1 = ( + sx_1_alpha, + sx_1_beta, + sx_1_gamma, + sx_1_delta, + sx_1_eta, + sx_1_zeta, + ) + sx_2 = ( + sx_2_alpha, + sx_2_beta, + sx_2_gamma, + sx_2_delta, + sx_2_eta, + sx_2_zeta, + ) return np.array((sx_1, sx_2)).transpose() diff --git a/python/tests/test_misc.py b/python/tests/test_misc.py index 331c806623..5a88fda6f8 100644 --- a/python/tests/test_misc.py +++ b/python/tests/test_misc.py @@ -7,7 +7,11 @@ import amici import pytest import sympy as sp -from amici.de_export import _custom_pow_eval_derivative, _monkeypatched, smart_subs_dict +from amici.de_export import ( + _custom_pow_eval_derivative, + _monkeypatched, + smart_subs_dict, +) from amici.testing import skip_on_valgrind @@ -71,7 +75,11 @@ def test_cmake_compilation(sbml_example_presimulation_module): try: subprocess.run( - cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + cmd, + shell=True, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, ) except subprocess.CalledProcessError as e: print(e.stdout.decode()) @@ -111,7 +119,9 @@ def test_monkeypatch(): assert (t**n).diff(t).subs(vals) is sp.nan # check that we can monkeypatch it out - with _monkeypatched(sp.Pow, "_eval_derivative", _custom_pow_eval_derivative): + with _monkeypatched( + sp.Pow, "_eval_derivative", _custom_pow_eval_derivative + ): assert (t**n).diff(t).subs(vals) is not sp.nan # check that the monkeypatch is transient diff --git a/python/tests/test_observable_events.py b/python/tests/test_observable_events.py index 83a7b94c7e..2887308ff6 100644 --- a/python/tests/test_observable_events.py +++ b/python/tests/test_observable_events.py @@ -62,7 +62,9 @@ def model_neuron_def(): } } - event_observables = {"z1": {"name": "z1", "event": "event_1", "formula": "time"}} + event_observables = { + "z1": {"name": "z1", "event": "event_1", "formula": "time"} + } return ( initial_assignments, parameters, @@ -210,7 +212,9 @@ def run_test_cases(model): edata = None if "data" in expected_results[model.getName()][case].keys(): edata = amici.readSimulationExpData( - str(expected_results_file), f"/{model_name}/{case}/data", model.get() + str(expected_results_file), + f"/{model_name}/{case}/data", + model.get(), ) rdata = amici.runAmiciSimulation(model, solver, edata) diff --git a/python/tests/test_pandas.py b/python/tests/test_pandas.py index e904fce7cc..21c58bcaff 100644 --- a/python/tests/test_pandas.py +++ b/python/tests/test_pandas.py @@ -49,8 +49,12 @@ def test_pandas_import_export(sbml_example_presimulation_module, case): assert case[fp] == getattr(edata_reconstructed[0], fp) else: - assert model.getFixedParameters() == getattr(edata_reconstructed[0], fp) + assert model.getFixedParameters() == getattr( + edata_reconstructed[0], fp + ) - assert model.getFixedParameters() == getattr(edata_reconstructed[0], fp) + assert model.getFixedParameters() == getattr( + edata_reconstructed[0], fp + ) assert getattr(edata[0], fp) == case[fp] diff --git a/python/tests/test_parameter_mapping.py b/python/tests/test_parameter_mapping.py index 32ddf93103..ae66e23f53 100644 --- a/python/tests/test_parameter_mapping.py +++ b/python/tests/test_parameter_mapping.py @@ -2,7 +2,10 @@ import os import pytest -from amici.parameter_mapping import ParameterMapping, ParameterMappingForCondition +from amici.parameter_mapping import ( + ParameterMapping, + ParameterMappingForCondition, +) from amici.testing import skip_on_valgrind @@ -25,16 +28,25 @@ def test_parameter_mapping_for_condition_default_args(): map_preeq_fix = {"sim_par2": "opt_par1"} map_sim_fix = {"sim_par2": "opt_par2"} par_map_for_condition = ParameterMappingForCondition( - map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix, map_sim_fix=map_sim_fix + map_sim_var=map_sim_var, + map_preeq_fix=map_preeq_fix, + map_sim_fix=map_sim_fix, ) expected_scale_map_sim_var = {"sim_par0": "lin", "sim_par1": "lin"} expected_scale_map_preeq_fix = {"sim_par2": "lin"} expected_scale_map_sim_fix = {"sim_par2": "lin"} - assert par_map_for_condition.scale_map_sim_var == expected_scale_map_sim_var - assert par_map_for_condition.scale_map_preeq_fix == expected_scale_map_preeq_fix - assert par_map_for_condition.scale_map_sim_fix == expected_scale_map_sim_fix + assert ( + par_map_for_condition.scale_map_sim_var == expected_scale_map_sim_var + ) + assert ( + par_map_for_condition.scale_map_preeq_fix + == expected_scale_map_preeq_fix + ) + assert ( + par_map_for_condition.scale_map_sim_fix == expected_scale_map_sim_fix + ) @skip_on_valgrind @@ -48,7 +60,9 @@ def test_parameter_mapping(): map_preeq_fix = {"sim_par2": "opt_par1"} map_sim_fix = {"sim_par2": "opt_par2"} par_map_for_condition = ParameterMappingForCondition( - map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix, map_sim_fix=map_sim_fix + map_sim_var=map_sim_var, + map_preeq_fix=map_preeq_fix, + map_sim_fix=map_sim_fix, ) parameter_mapping.append(par_map_for_condition) diff --git a/python/tests/test_petab_import.py b/python/tests/test_petab_import.py index f6db30f18a..fc978b76ea 100644 --- a/python/tests/test_petab_import.py +++ b/python/tests/test_petab_import.py @@ -60,7 +60,9 @@ def test_get_fixed_parameters(simple_sbml_model): ) ) parameter_df = petab.get_parameter_df( - pd.DataFrame({petab.PARAMETER_ID: ["p3", "p4"], petab.ESTIMATE: [0, 1]}) + pd.DataFrame( + {petab.PARAMETER_ID: ["p3", "p4"], petab.ESTIMATE: [0, 1]} + ) ) print(condition_df) print(parameter_df) @@ -122,7 +124,9 @@ def test_default_output_parameters(simple_sbml_model): ) assert ( 1.0 - == sbml_importer.sbml.getParameter("observableParameter1_obs1").getValue() + == sbml_importer.sbml.getParameter( + "observableParameter1_obs1" + ).getValue() ) with pytest.raises(ValueError): diff --git a/python/tests/test_petab_objective.py b/python/tests/test_petab_objective.py index 1b2436ceab..e31e693d11 100755 --- a/python/tests/test_petab_objective.py +++ b/python/tests/test_petab_objective.py @@ -50,7 +50,9 @@ def test_simulate_petab_sensitivities(lotka_volterra): for scaled_gradients in [True, False]: _problem_parameters = problem_parameters.copy() if scaled_parameters: - _problem_parameters = petab_problem.scale_parameters(problem_parameters) + _problem_parameters = petab_problem.scale_parameters( + problem_parameters + ) results[(scaled_parameters, scaled_gradients)] = pd.Series( amici.petab_objective.simulate_petab( petab_problem=petab_problem, diff --git a/python/tests/test_petab_simulate.py b/python/tests/test_petab_simulate.py index 385f98e05e..febea5fd50 100644 --- a/python/tests/test_petab_simulate.py +++ b/python/tests/test_petab_simulate.py @@ -53,7 +53,9 @@ def test_subset_call(petab_problem): simulator0 = PetabSimulator(petab_problem) assert not (Path(model_output_dir) / model_name).is_dir() - simulator0.simulate(model_name=model_name, model_output_dir=model_output_dir) + simulator0.simulate( + model_name=model_name, model_output_dir=model_output_dir + ) # Model name is handled correctly assert simulator0.amici_model.getName() == model_name # Check model output directory is created, by diff --git a/python/tests/test_preequilibration.py b/python/tests/test_preequilibration.py index d797c4bf3b..a42bc6354d 100644 --- a/python/tests/test_preequilibration.py +++ b/python/tests/test_preequilibration.py @@ -70,7 +70,16 @@ def preeq_fixture(pysb_example_presimulation_module): [1, 1, 1], ] - return (model, solver, edata, edata_preeq, edata_presim, edata_sim, pscales, plists) + return ( + model, + solver, + edata, + edata_preeq, + edata_presim, + edata_sim, + pscales, + plists, + ) def test_manual_preequilibration(preeq_fixture): @@ -133,7 +142,9 @@ def test_manual_preequilibration(preeq_fixture): rdata_sim[variable], atol=1e-6, rtol=1e-6, - err_msg=str(dict(pscale=pscale, plist=plist, variable=variable)), + err_msg=str( + dict(pscale=pscale, plist=plist, variable=variable) + ), ) @@ -349,7 +360,10 @@ def test_equilibration_methods_with_adjoints(preeq_fixture): amici.SteadyStateSensitivityMode.integrationOnly, amici.SteadyStateSensitivityMode.integrateIfNewtonFails, ] - sensi_meths = [amici.SensitivityMethod.forward, amici.SensitivityMethod.adjoint] + sensi_meths = [ + amici.SensitivityMethod.forward, + amici.SensitivityMethod.adjoint, + ] settings = itertools.product(equil_meths, sensi_meths) for setting in settings: @@ -374,7 +388,9 @@ def test_equilibration_methods_with_adjoints(preeq_fixture): atol=1e-6, rtol=1e-6, err_msg=str( - dict(variable=variable, setting1=setting1, setting2=setting2) + dict( + variable=variable, setting1=setting1, setting2=setting2 + ) ), ) @@ -523,11 +539,15 @@ def test_steadystate_computation_mode(preeq_fixture): assert rdatas[mode]["status"] == amici.AMICI_SUCCESS assert np.all( - rdatas[amici.SteadyStateComputationMode.integrationOnly]["preeq_status"][0] + rdatas[amici.SteadyStateComputationMode.integrationOnly][ + "preeq_status" + ][0] == [0, 1, 0] ) assert ( - rdatas[amici.SteadyStateComputationMode.integrationOnly]["preeq_numsteps"][0][0] + rdatas[amici.SteadyStateComputationMode.integrationOnly][ + "preeq_numsteps" + ][0][0] == 0 ) @@ -536,7 +556,10 @@ def test_steadystate_computation_mode(preeq_fixture): == [1, 0, 0] ) assert ( - rdatas[amici.SteadyStateComputationMode.newtonOnly]["preeq_numsteps"][0][0] > 0 + rdatas[amici.SteadyStateComputationMode.newtonOnly]["preeq_numsteps"][ + 0 + ][0] + > 0 ) # assert correct results @@ -563,7 +586,9 @@ def test_simulation_errors(preeq_fixture): ) = preeq_fixture solver.setSensitivityOrder(amici.SensitivityOrder.first) - solver.setSensitivityMethodPreequilibration(amici.SensitivityMethod.forward) + solver.setSensitivityMethodPreequilibration( + amici.SensitivityMethod.forward + ) model.setSteadyStateSensitivityMode( amici.SteadyStateSensitivityMode.integrationOnly ) @@ -594,10 +619,16 @@ def test_simulation_errors(preeq_fixture): rdata = amici.runAmiciSimulation(model, solver, e) assert rdata["status"] != amici.AMICI_SUCCESS assert rdata._swigptr.messages[0].severity == amici.LogSeverity_debug - assert rdata._swigptr.messages[0].identifier == "CVODES:CVode:RHSFUNC_FAIL" + assert ( + rdata._swigptr.messages[0].identifier + == "CVODES:CVode:RHSFUNC_FAIL" + ) assert rdata._swigptr.messages[1].severity == amici.LogSeverity_debug assert rdata._swigptr.messages[1].identifier == "EQUILIBRATION_FAILURE" - assert "exceedingly long simulation time" in rdata._swigptr.messages[1].message + assert ( + "exceedingly long simulation time" + in rdata._swigptr.messages[1].message + ) assert rdata._swigptr.messages[2].severity == amici.LogSeverity_error assert rdata._swigptr.messages[2].identifier == "OTHER" assert rdata._swigptr.messages[3].severity == amici.LogSeverity_debug diff --git a/python/tests/test_pregenerated_models.py b/python/tests/test_pregenerated_models.py index bd4bb7e53b..5a110cdfc2 100755 --- a/python/tests/test_pregenerated_models.py +++ b/python/tests/test_pregenerated_models.py @@ -97,12 +97,17 @@ def test_pregenerated_model(sub_test, case): verify_simulation_opts["atol"] = 1e-5 verify_simulation_opts["rtol"] = 1e-2 - if model_name.startswith("model_robertson") and case == "sensiforwardSPBCG": + if ( + model_name.startswith("model_robertson") + and case == "sensiforwardSPBCG" + ): verify_simulation_opts["atol"] = 1e-3 verify_simulation_opts["rtol"] = 1e-3 verify_simulation_results( - rdata, expected_results[sub_test][case]["results"], **verify_simulation_opts + rdata, + expected_results[sub_test][case]["results"], + **verify_simulation_opts, ) if model_name == "model_steadystate" and case == "sensiforwarderrorint": @@ -113,7 +118,9 @@ def test_pregenerated_model(sub_test, case): if ( edata and model_name != "model_neuron_o2" - and not (model_name == "model_robertson" and case == "sensiforwardSPBCG") + and not ( + model_name == "model_robertson" and case == "sensiforwardSPBCG" + ) ): if isinstance(edata, amici.amici.ExpData): edatas = [edata, edata] @@ -222,14 +229,18 @@ def verify_simulation_results( subfields = expected_results["diagnosis"].keys() else: - attrs = [field for field in fields if field in expected_results.attrs.keys()] + attrs = [ + field for field in fields if field in expected_results.attrs.keys() + ] if "diagnosis" in expected_results.keys(): subfields = [ field for field in fields if field in expected_results["diagnosis"].keys() ] - fields = [field for field in fields if field in expected_results.keys()] + fields = [ + field for field in fields if field in expected_results.keys() + ] if expected_results.attrs["status"][0] != 0: assert rdata["status"] == expected_results.attrs["status"][0] @@ -254,12 +265,22 @@ def verify_simulation_results( continue if field == "s2llh": _check_results( - rdata, field, expected_results[field][()], atol=1e-4, rtol=1e-3 + rdata, + field, + expected_results[field][()], + atol=1e-4, + rtol=1e-3, ) else: _check_results( - rdata, field, expected_results[field][()], atol=atol, rtol=rtol + rdata, + field, + expected_results[field][()], + atol=atol, + rtol=rtol, ) for attr in attrs: - _check_results(rdata, attr, expected_results.attrs[attr], atol=atol, rtol=rtol) + _check_results( + rdata, attr, expected_results.attrs[attr], atol=atol, rtol=rtol + ) diff --git a/python/tests/test_pysb.py b/python/tests/test_pysb.py index 5673667e3f..52ca3a320f 100644 --- a/python/tests/test_pysb.py +++ b/python/tests/test_pysb.py @@ -141,7 +141,9 @@ def test_compare_to_pysb_simulation(example): with amici.add_path(os.path.dirname(pysb.examples.__file__)): with amici.add_path( - os.path.join(os.path.dirname(__file__), "..", "tests", "pysb_test_models") + os.path.join( + os.path.dirname(__file__), "..", "tests", "pysb_test_models" + ) ): # load example pysb.SelfExporter.cleanup() # reset pysb @@ -185,7 +187,9 @@ def test_compare_to_pysb_simulation(example): observables=list(pysb_model.observables.keys()), ) - amici_model_module = amici.import_model_module(pysb_model.name, outdir) + amici_model_module = amici.import_model_module( + pysb_model.name, outdir + ) model_pysb = amici_model_module.getModel() model_pysb.setTimepoints(tspan) @@ -196,7 +200,9 @@ def test_compare_to_pysb_simulation(example): rdata = amici.runAmiciSimulation(model_pysb, solver) # check agreement of species simulations - assert np.isclose(rdata["x"], pysb_simres.species, 1e-4, 1e-4).all() + assert np.isclose( + rdata["x"], pysb_simres.species, 1e-4, 1e-4 + ).all() if example not in [ "fricker_2010_apoptosis", @@ -325,7 +331,8 @@ def test_heavyside_and_special_symbols(): "deg", a() >> None, pysb.Expression( - "rate", sp.Piecewise((1, pysb.Observable("a", a()) < 1), (0.0, True)) + "rate", + sp.Piecewise((1, pysb.Observable("a", a()) < 1), (0.0, True)), ), ) @@ -374,4 +381,6 @@ def test_energy(): solver.setRelativeTolerance(1e-14) solver.setAbsoluteTolerance(1e-14) - check_derivatives(amici_model, solver, epsilon=1e-4, rtol=1e-2, atol=1e-2) + check_derivatives( + amici_model, solver, epsilon=1e-4, rtol=1e-2, atol=1e-2 + ) diff --git a/python/tests/test_rdata.py b/python/tests/test_rdata.py index cbfc6dc7a9..29ea401932 100644 --- a/python/tests/test_rdata.py +++ b/python/tests/test_rdata.py @@ -23,7 +23,9 @@ def test_rdata_by_id(rdata_by_id_fixture): assert_array_equal(rdata.by_id(model.getStateIds()[1]), rdata.x[:, 1]) assert_array_equal(rdata.by_id(model.getStateIds()[1], "x"), rdata.x[:, 1]) - assert_array_equal(rdata.by_id(model.getStateIds()[1], "x", model), rdata.x[:, 1]) + assert_array_equal( + rdata.by_id(model.getStateIds()[1], "x", model), rdata.x[:, 1] + ) assert_array_equal( rdata.by_id(model.getObservableIds()[0], "y", model), rdata.y[:, 0] diff --git a/python/tests/test_sbml_import.py b/python/tests/test_sbml_import.py index 41ccdd925c..7c4a67c0a2 100644 --- a/python/tests/test_sbml_import.py +++ b/python/tests/test_sbml_import.py @@ -73,7 +73,10 @@ def test_sbml2amici_nested_observables_fail(simple_sbml_model): sbml_importer.sbml2amici( model_name=model_name, output_dir=tmpdir, - observables={"outer": {"formula": "inner"}, "inner": {"formula": "S1"}}, + observables={ + "outer": {"formula": "inner"}, + "inner": {"formula": "S1"}, + }, compute_conservation_laws=False, generate_sensitivity_code=False, compile=False, @@ -135,11 +138,15 @@ def observable_dependent_error_model(simple_sbml_model): "observable_s1_scaled": "0.02 * observable_s1_scaled", }, ) - yield amici.import_model_module(module_name=model_name, module_path=tmpdir) + yield amici.import_model_module( + module_name=model_name, module_path=tmpdir + ) @skip_on_valgrind -def test_sbml2amici_observable_dependent_error(observable_dependent_error_model): +def test_sbml2amici_observable_dependent_error( + observable_dependent_error_model, +): """Check gradients for model with observable-dependent error""" model_module = observable_dependent_error_model model = model_module.getModel() @@ -149,9 +156,14 @@ def test_sbml2amici_observable_dependent_error(observable_dependent_error_model) # generate artificial data rdata = amici.runAmiciSimulation(model, solver) assert_allclose( - rdata.sigmay[:, 0], 0.1 + 0.05 * rdata.y[:, 0], rtol=1.0e-5, atol=1.0e-8 + rdata.sigmay[:, 0], + 0.1 + 0.05 * rdata.y[:, 0], + rtol=1.0e-5, + atol=1.0e-8, + ) + assert_allclose( + rdata.sigmay[:, 1], 0.02 * rdata.y[:, 1], rtol=1.0e-5, atol=1.0e-8 ) - assert_allclose(rdata.sigmay[:, 1], 0.02 * rdata.y[:, 1], rtol=1.0e-5, atol=1.0e-8) edata = amici.ExpData(rdata, 1.0, 0.0) edata.setObservedDataStdDev(np.nan) @@ -196,7 +208,9 @@ def model_steadystate_module(): observables = amici.assignmentRules2observables( sbml_importer.sbml, - filter_function=lambda variable: variable.getId().startswith("observable_") + filter_function=lambda variable: variable.getId().startswith( + "observable_" + ) and not variable.getId().endswith("_sigma"), ) @@ -210,7 +224,9 @@ def model_steadystate_module(): sigmas={"observable_x1withsigma": "observable_x1withsigma_sigma"}, ) - yield amici.import_model_module(module_name=module_name, module_path=outdir) + yield amici.import_model_module( + module_name=module_name, module_path=outdir + ) @pytest.fixture(scope="session") @@ -223,7 +239,9 @@ def model_units_module(): with TemporaryDirectory() as outdir: sbml_importer.sbml2amici(model_name=module_name, output_dir=outdir) - yield amici.import_model_module(module_name=module_name, module_path=outdir) + yield amici.import_model_module( + module_name=module_name, module_path=outdir + ) def test_presimulation(sbml_example_presimulation_module): @@ -323,7 +341,9 @@ def test_steadystate_simulation(model_steadystate_module): solver.setRelativeTolerance(1e-12) solver.setAbsoluteTolerance(1e-12) - check_derivatives(model, solver, edata[0], atol=1e-3, rtol=1e-3, epsilon=1e-4) + check_derivatives( + model, solver, edata[0], atol=1e-3, rtol=1e-3, epsilon=1e-4 + ) # Run some additional tests which need a working Model, # but don't need precomputed expectations. @@ -406,7 +426,9 @@ def model_test_likelihoods(): noise_distributions=noise_distributions, ) - yield amici.import_model_module(module_name=module_name, module_path=outdir) + yield amici.import_model_module( + module_name=module_name, module_path=outdir + ) @skip_on_valgrind @@ -512,7 +534,9 @@ def test_units(model_units_module): @skip_on_valgrind -@pytest.mark.skipif(os.name == "nt", reason="Avoid `CERTIFICATE_VERIFY_FAILED` error") +@pytest.mark.skipif( + os.name == "nt", reason="Avoid `CERTIFICATE_VERIFY_FAILED` error" +) def test_sympy_exp_monkeypatch(): """ This model contains a removeable discontinuity at t=0 that requires @@ -557,7 +581,9 @@ def test_sympy_exp_monkeypatch(): # print sensitivity-related results assert rdata["status"] == amici.AMICI_SUCCESS - check_derivatives(model, solver, None, atol=1e-2, rtol=1e-2, epsilon=1e-3) + check_derivatives( + model, solver, None, atol=1e-2, rtol=1e-2, epsilon=1e-3 + ) def normal_nllh(m, y, sigma): @@ -594,7 +620,8 @@ def log_laplace_nllh(m, y, sigma): def log10_laplace_nllh(m, y, sigma): return sum( - np.log(2 * sigma * m * np.log(10)) + np.abs(np.log10(y) - np.log10(m)) / sigma + np.log(2 * sigma * m * np.log(10)) + + np.abs(np.log10(y) - np.log10(m)) / sigma ) diff --git a/python/tests/test_sbml_import_special_functions.py b/python/tests/test_sbml_import_special_functions.py index 1aa806156d..9d8f447511 100644 --- a/python/tests/test_sbml_import_special_functions.py +++ b/python/tests/test_sbml_import_special_functions.py @@ -51,7 +51,9 @@ def model_special_likelihoods(): noise_distributions=noise_distributions, ) - yield amici.import_model_module(module_name=module_name, module_path=outdir) + yield amici.import_model_module( + module_name=module_name, module_path=outdir + ) @skip_on_valgrind @@ -111,7 +113,12 @@ def test_special_likelihoods(model_special_likelihoods): solver.setSensitivityMethod(sensi_method) solver.setSensitivityOrder(amici.SensitivityOrder.first) check_derivatives( - model, solver, edata, atol=1e-4, rtol=1e-3, check_least_squares=False + model, + solver, + edata, + atol=1e-4, + rtol=1e-3, + check_least_squares=False, ) # Test for m > y, i.e. in region with 0 density @@ -205,9 +212,13 @@ def test_rateof(): assert_approx_equal(rdata["xdot"][i_S1], rdata["xdot"][i_p2]) assert_array_almost_equal_nulp(rdata.by_id("S3"), t, 10) - assert_array_almost_equal_nulp(rdata.by_id("S2"), 2 * rdata.by_id("S3")) + assert_array_almost_equal_nulp( + rdata.by_id("S2"), 2 * rdata.by_id("S3") + ) assert_array_almost_equal_nulp( rdata.by_id("S4")[1:], 0.5 * np.diff(rdata.by_id("S3")), 10 ) assert_array_almost_equal_nulp(rdata.by_id("p3"), 0) - assert_array_almost_equal_nulp(rdata.by_id("p2"), 1 + rdata.by_id("S1")) + assert_array_almost_equal_nulp( + rdata.by_id("p2"), 1 + rdata.by_id("S1") + ) diff --git a/python/tests/test_splines.py b/python/tests/test_splines.py index 2385631ab5..a7fe01e84a 100644 --- a/python/tests/test_splines.py +++ b/python/tests/test_splines.py @@ -61,7 +61,9 @@ def test_multiple_splines(**kwargs): tols5 = (tols5, tols5, tols5) tols = [] - for t0, t1, t2, t3, t4, t5 in zip(tols0, tols1, tols2, tols3, tols4, tols5): + for t0, t1, t2, t3, t4, t5 in zip( + tols0, tols1, tols2, tols3, tols4, tols5 + ): keys = set().union( t0.keys(), t1.keys(), t2.keys(), t3.keys(), t4.keys(), t5.keys() ) @@ -98,7 +100,8 @@ def test_multiple_splines(**kwargs): # groundtruth = test_multiple_splines(return_groundtruth=True) # They should be recomputed only if the splines used in the test change precomputed_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "test_splines_precomputed.npz" + os.path.dirname(os.path.abspath(__file__)), + "test_splines_precomputed.npz", ) kwargs["groundtruth"] = dict(np.load(precomputed_path)) diff --git a/python/tests/test_splines_python.py b/python/tests/test_splines_python.py index 905ae8b83d..4c4de5ccfc 100644 --- a/python/tests/test_splines_python.py +++ b/python/tests/test_splines_python.py @@ -217,7 +217,9 @@ def check_gradient(spline, t, params, params_values, expected, rel_tol=1e-9): value = spline.evaluate(t) subs = {pname: pvalue for (pname, pvalue) in zip(params, params_values)} for p, exp in zip(params, expected): - assert math.isclose(float(value.diff(p).subs(subs)), exp, rel_tol=rel_tol) + assert math.isclose( + float(value.diff(p).subs(subs)), exp, rel_tol=rel_tol + ) @skip_on_valgrind @@ -232,13 +234,25 @@ def test_SplineUniformSensitivity(): ) check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0]) check_gradient( - spline, 0.25, params, params_values, [0.539062, 0.179688, 4.45312], rel_tol=1e-5 + spline, + 0.25, + params, + params_values, + [0.539062, 0.179688, 4.45312], + rel_tol=1e-5, ) check_gradient(spline, 1.0 / 3, params, params_values, [0.0, 0.0, 5.0]) - check_gradient(spline, 0.50, params, params_values, [0.1875, -0.125, 2.625]) + check_gradient( + spline, 0.50, params, params_values, [0.1875, -0.125, 2.625] + ) check_gradient(spline, 2.0 / 3, params, params_values, [0.0, 0.0, 0.0]) check_gradient( - spline, 0.75, params, params_values, [-1.07812, 0.179688, 0.1875], rel_tol=1e-5 + spline, + 0.75, + params, + params_values, + [-1.07812, 0.179688, 0.1875], + rel_tol=1e-5, ) check_gradient(spline, 1.00, params, params_values, [-6.0, 1.0, 3.0]) @@ -255,12 +269,19 @@ def test_SplineNonUniformSensitivity(): ) check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0]) check_gradient( - spline, 0.05, params, params_values, [1.3125, 0.4375, 2.89062], rel_tol=1e-5 + spline, + 0.05, + params, + params_values, + [1.3125, 0.4375, 2.89062], + rel_tol=1e-5, ) check_gradient(spline, 0.10, params, params_values, [0.0, 0.0, 5.0]) check_gradient(spline, 0.30, params, params_values, [-0.45, -0.3, 3.6]) check_gradient(spline, 0.50, params, params_values, [0.0, 0.0, 0.0]) - check_gradient(spline, 0.75, params, params_values, [-2.625, 0.4375, 0.921875]) + check_gradient( + spline, 0.75, params, params_values, [-2.625, 0.4375, 0.921875] + ) check_gradient(spline, 1.00, params, params_values, [-6.0, 1.0, 3.0]) @@ -282,15 +303,30 @@ def test_SplineExplicitSensitivity(): ) check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0]) check_gradient( - spline, 0.25, params, params_values, [0.46875, 0.109375, 4.37109], rel_tol=1e-6 + spline, + 0.25, + params, + params_values, + [0.46875, 0.109375, 4.37109], + rel_tol=1e-6, ) check_gradient(spline, 1.0 / 3, params, params_values, [0.0, 0.0, 5.0]) check_gradient( - spline, 0.50, params, params_values, [-0.166667, 0.0641793, 2.625], rel_tol=1e-5 + spline, + 0.50, + params, + params_values, + [-0.166667, 0.0641793, 2.625], + rel_tol=1e-5, ) check_gradient(spline, 2.0 / 3, params, params_values, [0.0, 0.0, 0.0]) check_gradient( - spline, 0.75, params, params_values, [-0.75, 0.130923, 0.46875], rel_tol=1e-5 + spline, + 0.75, + params, + params_values, + [-0.75, 0.130923, 0.46875], + rel_tol=1e-5, ) check_gradient(spline, 1.00, params, params_values, [-6.0, 1.0, 3.0]) @@ -308,7 +344,12 @@ def test_SplineLogarithmicSensitivity(): ) check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0]) check_gradient( - spline, 0.25, params, params_values, [0.585881, 0.195294, 4.38532], rel_tol=1e-5 + spline, + 0.25, + params, + params_values, + [0.585881, 0.195294, 4.38532], + rel_tol=1e-5, ) check_gradient(spline, 1.0 / 3, params, params_values, [0.0, 0.0, 5.0]) check_gradient( diff --git a/python/tests/test_splines_short.py b/python/tests/test_splines_short.py index 37df5f5db9..59e54a3279 100644 --- a/python/tests/test_splines_short.py +++ b/python/tests/test_splines_short.py @@ -99,7 +99,9 @@ def test_splines_plist(): # Real spline #3 xx = UniformGrid(0, 5, number_of_nodes=6) p1, p2, p3, p4, p5 = sp.symbols("p1 p2 p3 p4 p5") - yy = np.asarray([p1 + p2, p2 * p3, p4, sp.cos(p1 + p3), p4 * sp.log(p1), p3]) + yy = np.asarray( + [p1 + p2, p2 * p3, p4, sp.cos(p1 + p3), p4 * sp.log(p1), p3] + ) dd = np.asarray([-0.75, -0.875, p5, 0.125, 1.15057181, 0.0]) params = {p1: 1.0, p2: 0.5, p3: 1.5, p4: -0.25, p5: -0.5} # print([y.subs(params).evalf() for y in yy]) diff --git a/python/tests/test_swig_interface.py b/python/tests/test_swig_interface.py index 09cc4c78af..b1afa0b76b 100644 --- a/python/tests/test_swig_interface.py +++ b/python/tests/test_swig_interface.py @@ -49,7 +49,9 @@ def test_copy_constructors(pysb_example_presimulation_module): obj_clone = obj.clone() - assert get_val(obj, attr) == get_val(obj_clone, attr), f"{obj} - {attr}" + assert get_val(obj, attr) == get_val( + obj_clone, attr + ), f"{obj} - {attr}" # `None` values are skipped in `test_model_instance_settings`. @@ -165,16 +167,22 @@ def test_model_instance_settings(pysb_example_presimulation_module): # The new model has the default settings. model_default_settings = amici.get_model_settings(model) for name in model_instance_settings: - if (name == "InitialStates" and not model.hasCustomInitialStates()) or ( + if ( + name == "InitialStates" and not model.hasCustomInitialStates() + ) or ( name - == ("getInitialStateSensitivities", "setUnscaledInitialStateSensitivities") + == ( + "getInitialStateSensitivities", + "setUnscaledInitialStateSensitivities", + ) and not model.hasCustomInitialStateSensitivities() ): # Here the expected value differs from what the getter would return assert model_default_settings[name] == [] else: assert ( - model_default_settings[name] == model_instance_settings[name][i_default] + model_default_settings[name] + == model_instance_settings[name][i_default] ), name # The grouped setter method works. @@ -221,7 +229,9 @@ def test_interdependent_settings(pysb_example_presimulation_module): # Some values need to be transformed to be tested in Python # (e.g. SWIG objects). Default transformer is no transformation # (the identity function). - getter_transformers = {setting: (lambda x: x) for setting in original_settings} + getter_transformers = { + setting: (lambda x: x) for setting in original_settings + } getter_transformers.update( { # Convert from SWIG object. @@ -311,7 +321,9 @@ def test_unhandled_settings(pysb_example_presimulation_module): name for names in model_instance_settings for name in ( - names if isinstance(names, tuple) else (f"get{names}", f"set{names}") + names + if isinstance(names, tuple) + else (f"get{names}", f"set{names}") ) ] @@ -375,7 +387,9 @@ def test_model_instance_settings_custom_x0(pysb_example_presimulation_module): assert not model.hasCustomInitialStateSensitivities() settings = amici.get_model_settings(model) model.setInitialStates(model.getInitialStates()) - model.setUnscaledInitialStateSensitivities(model.getInitialStateSensitivities()) + model.setUnscaledInitialStateSensitivities( + model.getInitialStateSensitivities() + ) amici.set_model_settings(model, settings) assert not model.hasCustomInitialStates() assert not model.hasCustomInitialStateSensitivities() diff --git a/python/tests/util.py b/python/tests/util.py index 14f514c997..dde10eb454 100644 --- a/python/tests/util.py +++ b/python/tests/util.py @@ -31,7 +31,9 @@ def create_amici_model(sbml_model, model_name, **kwargs) -> AmiciModel: else tempfile.mkdtemp() ) - sbml_importer.sbml2amici(model_name=model_name, output_dir=output_dir, **kwargs) + sbml_importer.sbml2amici( + model_name=model_name, output_dir=output_dir, **kwargs + ) model_module = import_model_module(model_name, output_dir) return model_module.getModel() @@ -111,7 +113,9 @@ def create_event_assignment(target, assignment): create_event_assignment(event_target, event_assignment) else: - create_event_assignment(event_def["target"], event_def["assignment"]) + create_event_assignment( + event_def["target"], event_def["assignment"] + ) if to_file: libsbml.writeSBMLToFile(document, to_file) @@ -133,7 +137,9 @@ def check_trajectories_without_sensitivities( solver.setAbsoluteTolerance(1e-15) solver.setRelativeTolerance(1e-12) rdata = runAmiciSimulation(amici_model, solver=solver) - _check_close(rdata["x"], result_expected_x, field="x", rtol=5e-9, atol=1e-13) + _check_close( + rdata["x"], result_expected_x, field="x", rtol=5e-9, atol=1e-13 + ) def check_trajectories_with_forward_sensitivities( @@ -153,5 +159,9 @@ def check_trajectories_with_forward_sensitivities( solver.setAbsoluteToleranceFSA(1e-15) solver.setRelativeToleranceFSA(1e-13) rdata = runAmiciSimulation(amici_model, solver=solver) - _check_close(rdata["x"], result_expected_x, field="x", rtol=1e-10, atol=1e-12) - _check_close(rdata["sx"], result_expected_sx, field="sx", rtol=1e-7, atol=1e-9) + _check_close( + rdata["x"], result_expected_x, field="x", rtol=1e-10, atol=1e-12 + ) + _check_close( + rdata["sx"], result_expected_sx, field="sx", rtol=1e-7, atol=1e-9 + ) diff --git a/tests/benchmark-models/evaluate_benchmark.py b/tests/benchmark-models/evaluate_benchmark.py index bcf1f63bb8..0c6e2e4122 100644 --- a/tests/benchmark-models/evaluate_benchmark.py +++ b/tests/benchmark-models/evaluate_benchmark.py @@ -29,12 +29,15 @@ ratios = ( pd.concat( - [df[sensi] / df["t_sim"].values for sensi in ["t_fwd", "t_adj"]] + [df.np], + [df[sensi] / df["t_sim"].values for sensi in ["t_fwd", "t_adj"]] + + [df.np], axis=1, ) .reset_index() .melt(id_vars=["index", "np"]) - .rename(columns={"index": "model", "variable": "sensitivity", "value": "ratio"}) + .rename( + columns={"index": "model", "variable": "sensitivity", "value": "ratio"} + ) ) ratios["sensitivity"] = ratios["sensitivity"].replace( {"t_fwd": "forward", "t_adj": "adjoint"} @@ -48,7 +51,14 @@ for ir, row in ratios.iterrows(): if row.sensitivity == "adjoint": continue - g.text(ir, row["np"], int(row["np"]), color="black", ha="center", weight="bold") + g.text( + ir, + row["np"], + int(row["np"]), + color="black", + ha="center", + weight="bold", + ) plt.xticks(rotation=30, horizontalalignment="right") plt.tight_layout() diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py index 91bd117a81..0b3c6d80e0 100755 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -19,7 +19,9 @@ RTOL: float = 1e-2 benchmark_path = ( - Path(__file__).parent.parent.parent / "Benchmark-Models-PEtab" / "Benchmark-Models" + Path(__file__).parent.parent.parent + / "Benchmark-Models-PEtab" + / "Benchmark-Models" ) # reuse compiled models from test_benchmark_collection.sh benchmark_outdir = Path(__file__).parent.parent.parent / "test_bmc" @@ -67,11 +69,15 @@ def test_benchmark_gradient(model, scale): # only fail on linear scale pytest.skip() - petab_problem = petab.Problem.from_yaml(benchmark_path / model / (model + ".yaml")) + petab_problem = petab.Problem.from_yaml( + benchmark_path / model / (model + ".yaml") + ) petab.flatten_timepoint_specific_output_overrides(petab_problem) # Only compute gradient for estimated parameters. - parameter_df_free = petab_problem.parameter_df.loc[petab_problem.x_free_ids] + parameter_df_free = petab_problem.parameter_df.loc[ + petab_problem.x_free_ids + ] parameter_ids = list(parameter_df_free.index) # Setup AMICI objects. @@ -160,7 +166,11 @@ def test_benchmark_gradient(model, scale): df = pd.DataFrame( [ { - ("fd", r.metadata["size_absolute"], str(r.method_id)): r.value + ( + "fd", + r.metadata["size_absolute"], + str(r.method_id), + ): r.value for c in d.computers for r in c.results } diff --git a/tests/benchmark-models/test_petab_model.py b/tests/benchmark-models/test_petab_model.py index 5742d668c6..cf85147535 100755 --- a/tests/benchmark-models/test_petab_model.py +++ b/tests/benchmark-models/test_petab_model.py @@ -16,7 +16,12 @@ import petab import yaml from amici.logging import get_logger -from amici.petab_objective import LLH, RDATAS, rdatas_to_measurement_df, simulate_petab +from amici.petab_objective import ( + LLH, + RDATAS, + rdatas_to_measurement_df, + simulate_petab, +) from petab.visualize import plot_problem logger = get_logger(f"amici.{__name__}", logging.WARNING) @@ -86,7 +91,8 @@ def parse_cli_args(): "-o", "--simulation-file", dest="simulation_file", - help="File to write simulation result to, in PEtab" "measurement table format.", + help="File to write simulation result to, in PEtab" + "measurement table format.", ) return parser.parse_args() @@ -162,10 +168,14 @@ def main(): times["np"] = sum(problem.parameter_df[petab.ESTIMATE]) - pd.Series(times).to_csv(f"./tests/benchmark-models/{args.model_name}_benchmark.csv") + pd.Series(times).to_csv( + f"./tests/benchmark-models/{args.model_name}_benchmark.csv" + ) for rdata in rdatas: - assert rdata.status == amici.AMICI_SUCCESS, f"Simulation failed for {rdata.id}" + assert ( + rdata.status == amici.AMICI_SUCCESS + ), f"Simulation failed for {rdata.id}" # create simulation PEtab table sim_df = rdatas_to_measurement_df( @@ -184,7 +194,8 @@ def main(): # save figure for plot_id, ax in axs.items(): fig_path = os.path.join( - args.model_directory, f"{args.model_name}_{plot_id}_vis.png" + args.model_directory, + f"{args.model_name}_{plot_id}_vis.png", ) logger.info(f"Saving figure to {fig_path}") ax.get_figure().savefig(fig_path, dpi=150) @@ -211,7 +222,8 @@ def main(): if np.isclose(llh, ref_llh, rtol=rtol, atol=atol): logger.info( - f"Computed llh {llh:.4e} matches reference {ref_llh:.4e}." + tolstr + f"Computed llh {llh:.4e} matches reference {ref_llh:.4e}." + + tolstr ) else: logger.error( diff --git a/tests/conftest.py b/tests/conftest.py index 4d2f5521ff..9e90400518 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -48,7 +48,9 @@ def get_all_semantic_case_ids(): suite""" pattern = re.compile(r"\d{5}") return sorted( - str(x.name) for x in SBML_SEMANTIC_CASES_DIR.iterdir() if pattern.match(x.name) + str(x.name) + for x in SBML_SEMANTIC_CASES_DIR.iterdir() + if pattern.match(x.name) ) @@ -78,7 +80,9 @@ def pytest_generate_tests(metafunc): def pytest_sessionfinish(session, exitstatus): """Process test results""" global passed_ids - terminalreporter = session.config.pluginmanager.get_plugin("terminalreporter") + terminalreporter = session.config.pluginmanager.get_plugin( + "terminalreporter" + ) terminalreporter.ensure_newline() # parse test names to get passed case IDs (don't know any better way to # access fixture values) @@ -100,9 +104,14 @@ def write_passed_tags(passed_ids, out=sys.stdout): passed_component_tags |= cur_component_tags passed_test_tags |= cur_test_tags - out.write("\nAt least one test with the following component tags has " "passed:\n") + out.write( + "\nAt least one test with the following component tags has " + "passed:\n" + ) out.write(" " + "\n ".join(sorted(passed_component_tags))) - out.write("\n\nAt least one test with the following test tags has " "passed:\n") + out.write( + "\n\nAt least one test with the following test tags has " "passed:\n" + ) out.write(" " + "\n ".join(sorted(passed_test_tags))) @@ -132,7 +141,9 @@ def get_tags_for_test(test_id: str) -> Tuple[Set[str], Set[str]]: test_tags = set() for line in f: if line.startswith("testTags:"): - test_tags = set(re.split(r"[ ,:]", line[len("testTags:") :].strip())) + test_tags = set( + re.split(r"[ ,:]", line[len("testTags:") :].strip()) + ) test_tags.discard("") if line.startswith("componentTags:"): component_tags = set( diff --git a/tests/generateTestConfig/example.py b/tests/generateTestConfig/example.py index a0b2891344..963fdf64ce 100644 --- a/tests/generateTestConfig/example.py +++ b/tests/generateTestConfig/example.py @@ -18,7 +18,9 @@ def dict2hdf5(object, dictionary): dtype = "f8" else: dtype = " List[int]: def pytest_addoption(parser): """Add pytest CLI options""" parser.addoption("--petab-cases", help="Test cases to run") - parser.addoption("--only-pysb", help="Run only PySB tests", action="store_true") + parser.addoption( + "--only-pysb", help="Run only PySB tests", action="store_true" + ) parser.addoption( "--only-sbml", help="Run only SBML tests", @@ -44,7 +46,10 @@ def pytest_generate_tests(metafunc): """Parameterize tests""" # Run for all PEtab test suite cases - if "case" in metafunc.fixturenames and "model_type" in metafunc.fixturenames: + if ( + "case" in metafunc.fixturenames + and "model_type" in metafunc.fixturenames + ): # Get CLI option cases = metafunc.config.getoption("--petab-cases") if cases: @@ -59,7 +64,9 @@ def pytest_generate_tests(metafunc): (case, "sbml", version) for version in ("v1.0.0", "v2.0.0") for case in ( - test_numbers if test_numbers else get_cases("sbml", version=version) + test_numbers + if test_numbers + else get_cases("sbml", version=version) ) ] elif metafunc.config.getoption("--only-pysb"): diff --git a/tests/petab_test_suite/test_petab_suite.py b/tests/petab_test_suite/test_petab_suite.py index 59e2ce7723..35ee3adcfc 100755 --- a/tests/petab_test_suite/test_petab_suite.py +++ b/tests/petab_test_suite/test_petab_suite.py @@ -57,7 +57,9 @@ def _test_case(case, model_type, version): # compile amici model if case.startswith("0006"): petab.flatten_timepoint_specific_output_overrides(problem) - model_name = f"petab_{model_type}_test_case_{case}" f"_{version.replace('.', '_')}" + model_name = ( + f"petab_{model_type}_test_case_{case}" f"_{version.replace('.', '_')}" + ) model_output_dir = f"amici_models/{model_name}" model = import_petab_problem( petab_problem=problem, @@ -79,9 +81,13 @@ def _test_case(case, model_type, version): rdatas = ret["rdatas"] chi2 = sum(rdata["chi2"] for rdata in rdatas) llh = ret["llh"] - simulation_df = rdatas_to_measurement_df(rdatas, model, problem.measurement_df) + simulation_df = rdatas_to_measurement_df( + rdatas, model, problem.measurement_df + ) petab.check_measurement_df(simulation_df, problem.observable_df) - simulation_df = simulation_df.rename(columns={petab.MEASUREMENT: petab.SIMULATION}) + simulation_df = simulation_df.rename( + columns={petab.MEASUREMENT: petab.SIMULATION} + ) simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int) solution = petabtests.load_solution(case, model_type, version=version) gt_chi2 = solution[petabtests.CHI2] @@ -109,17 +115,26 @@ def _test_case(case, model_type, version): ) if not simulations_match: with pd.option_context( - "display.max_rows", None, "display.max_columns", None, "display.width", 200 + "display.max_rows", + None, + "display.max_columns", + None, + "display.width", + 200, ): logger.log( logging.DEBUG, - f"x_ss: {model.getStateIds()} " f"{[rdata.x_ss for rdata in rdatas]}", + f"x_ss: {model.getStateIds()} " + f"{[rdata.x_ss for rdata in rdatas]}", + ) + logger.log( + logging.ERROR, f"Expected simulations:\n{gt_simulation_dfs}" ) - logger.log(logging.ERROR, f"Expected simulations:\n{gt_simulation_dfs}") logger.log(logging.ERROR, f"Actual simulations:\n{simulation_df}") logger.log( logging.DEBUG if chi2s_match else logging.ERROR, - f"CHI2: simulated: {chi2}, expected: {gt_chi2}," f" match = {chi2s_match}", + f"CHI2: simulated: {chi2}, expected: {gt_chi2}," + f" match = {chi2s_match}", ) logger.log( logging.DEBUG if simulations_match else logging.ERROR, @@ -130,7 +145,9 @@ def _test_case(case, model_type, version): if not all([llhs_match, simulations_match]) or not chi2s_match: logger.error(f"Case {case} failed.") - raise AssertionError(f"Case {case}: Test results do not match " "expectations") + raise AssertionError( + f"Case {case}: Test results do not match " "expectations" + ) logger.info(f"Case {case} passed.") @@ -159,7 +176,9 @@ def check_derivatives( ) for edata in create_parameterized_edatas( - amici_model=model, petab_problem=problem, problem_parameters=problem_parameters + amici_model=model, + petab_problem=problem, + problem_parameters=problem_parameters, ): # check_derivatives does currently not support parameters in ExpData model.setParameters(edata.parameters) diff --git a/tests/testSBMLSuite.py b/tests/testSBMLSuite.py index f11870b60d..cfad477ac4 100755 --- a/tests/testSBMLSuite.py +++ b/tests/testSBMLSuite.py @@ -44,7 +44,9 @@ def sbml_test_dir(): sys.path = old_path -def test_sbml_testsuite_case(test_number, result_path, sbml_semantic_cases_dir): +def test_sbml_testsuite_case( + test_number, result_path, sbml_semantic_cases_dir +): test_id = format_test_id(test_number) model_dir = None @@ -67,7 +69,8 @@ def test_sbml_testsuite_case(test_number, result_path, sbml_semantic_cases_dir): results_file = current_test_path / f"{test_id}-results.csv" results = pd.read_csv(results_file, delimiter=",") results.rename( - columns={c: c.replace(" ", "") for c in results.columns}, inplace=True + columns={c: c.replace(" ", "") for c in results.columns}, + inplace=True, ) # setup model @@ -91,7 +94,9 @@ def test_sbml_testsuite_case(test_number, result_path, sbml_semantic_cases_dir): raise RuntimeError("Simulation failed unexpectedly") # verify - simulated = verify_results(settings, rdata, results, wrapper, model, atol, rtol) + simulated = verify_results( + settings, rdata, results, wrapper, model, atol, rtol + ) # record results write_result_file(simulated, test_id, result_path) @@ -116,7 +121,10 @@ def verify_results(settings, rdata, expected, wrapper, model, atol, rtol): # collect states simulated = pd.DataFrame( rdata["y"], - columns=[obs["name"] for obs in wrapper.symbols[SymbolId.OBSERVABLE].values()], + columns=[ + obs["name"] + for obs in wrapper.symbols[SymbolId.OBSERVABLE].values() + ], ) simulated["time"] = rdata["ts"] # collect parameters @@ -128,14 +136,18 @@ def verify_results(settings, rdata, expected, wrapper, model, atol, rtol): simulated[expr_id.removeprefix("flux_")] = rdata.w[:, expr_idx] # handle renamed reserved symbols simulated.rename( - columns={c: c.replace("amici_", "") for c in simulated.columns}, inplace=True + columns={c: c.replace("amici_", "") for c in simulated.columns}, + inplace=True, ) # SBML test suite case 01308 defines species with initialAmount and # hasOnlySubstanceUnits="true", but then request results as concentrations. requested_concentrations = [ s - for s in settings["concentration"].replace(" ", "").replace("\n", "").split(",") + for s in settings["concentration"] + .replace(" ", "") + .replace("\n", "") + .split(",") if s ] # We only need to convert species that have only substance units @@ -145,7 +157,8 @@ def verify_results(settings, rdata, expected, wrapper, model, atol, rtol): **wrapper.symbols[SymbolId.SPECIES], **wrapper.symbols[SymbolId.ALGEBRAIC_STATE], }.items() - if str(state_id) in requested_concentrations and state.get("amount", False) + if str(state_id) in requested_concentrations + and state.get("amount", False) ] amounts_to_concentrations( concentration_species, wrapper, simulated, requested_concentrations @@ -218,7 +231,9 @@ def concentrations_to_amounts( # Species with OnlySubstanceUnits don't have to be converted as long # as we don't request concentrations for them. Only applies when # called from amounts_to_concentrations. - if (is_amt and species not in requested_concentrations) or comp is None: + if ( + is_amt and species not in requested_concentrations + ) or comp is None: continue simulated.loc[:, species] *= simulated.loc[ @@ -226,7 +241,9 @@ def concentrations_to_amounts( ] -def write_result_file(simulated: pd.DataFrame, test_id: str, result_path: Path): +def write_result_file( + simulated: pd.DataFrame, test_id: str, result_path: Path +): """ Create test result file for upload to http://raterule.caltech.edu/Facilities/Database @@ -243,10 +260,14 @@ def get_amount_and_variables(settings): """Read amount and species from settings file""" # species for which results are expected as amounts - amount_species = settings["amount"].replace(" ", "").replace("\n", "").split(",") + amount_species = ( + settings["amount"].replace(" ", "").replace("\n", "").split(",") + ) # IDs of all variables for which results are expected/provided - variables = settings["variables"].replace(" ", "").replace("\n", "").split(",") + variables = ( + settings["variables"].replace(" ", "").replace("\n", "").split(",") + ) return amount_species, variables From 5e5ec618fdc53b22f3617adc2890e9c0dc241046 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Wed, 20 Sep 2023 14:54:24 +0200 Subject: [PATCH 32/32] Doc: Rename 'steadystate example' (#2174) New title + some minor updates Closes #1990 Mark notebook to always execute it under nbsphinx --- binder/overview.ipynb | 2 +- .../ExampleSteadystate.ipynb | 42 +++++++++---------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/binder/overview.ipynb b/binder/overview.ipynb index 6d98f0f9fb..0a7ce81084 100644 --- a/binder/overview.ipynb +++ b/binder/overview.ipynb @@ -12,7 +12,7 @@ "\n", " Brief intro to AMICI for first-time users.\n", "\n", - "* [Example \"steadystate\"](../python/examples/example_steadystate/ExampleSteadystate.ipynb)\n", + "* [SBML import, observation model, sensitivity analysis, data export and visualization](../python/examples/example_steadystate/ExampleSteadystate.ipynb)\n", "\n", " A more detailed introduction to the AMICI interface, demonstrating sensitivity analysis, various options, finite difference checks, ...\n", "\n", diff --git a/python/examples/example_steadystate/ExampleSteadystate.ipynb b/python/examples/example_steadystate/ExampleSteadystate.ipynb index 502174fe15..09590a3b1a 100644 --- a/python/examples/example_steadystate/ExampleSteadystate.ipynb +++ b/python/examples/example_steadystate/ExampleSteadystate.ipynb @@ -4,9 +4,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# AMICI Python example \"steadystate\"\n", + "# SBML import, observation model, sensitivity analysis, data export and visualization\n", "\n", - "This is an example using the [model_steadystate_scaled.sbml] model to demonstrate and test SBML import and AMICI Python interface." + "This is an example using the [model_steadystate_scaled.sbml] model to demonstrate:\n", + "\n", + "* SBML import\n", + "* specifying the observation model\n", + "* performing sensitivity analysis\n", + "* exporting and visualizing simulation results" ] }, { @@ -23,10 +28,7 @@ "model_output_dir = model_name\n", "\n", "import libsbml\n", - "import importlib\n", "import amici\n", - "import os\n", - "import sys\n", "import numpy as np\n", "import matplotlib.pyplot as plt" ] @@ -145,7 +147,7 @@ "metadata": {}, "outputs": [], "source": [ - "constantParameters = [\"k0\"]" + "constant_parameters = [\"k0\"]" ] }, { @@ -341,7 +343,7 @@ " model_output_dir,\n", " verbose=logging.INFO,\n", " observables=observables,\n", - " constant_parameters=constantParameters,\n", + " constant_parameters=constant_parameters,\n", " sigmas=sigmas,\n", ")" ] @@ -361,8 +363,7 @@ "metadata": {}, "outputs": [], "source": [ - "sys.path.insert(0, os.path.abspath(model_output_dir))\n", - "model_module = importlib.import_module(model_name)" + "model_module = amici.import_model_module(model_name, model_output_dir)" ] }, { @@ -447,7 +448,7 @@ "print(\n", " \"Simulation was run using model default parameters as specified in the SBML model:\"\n", ")\n", - "print(model.getParameters())" + "print(dict(zip(model.getParameterIds(), model.getParameters())))" ] }, { @@ -862,18 +863,14 @@ }, { "cell_type": "code", - "execution_count": 13, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(1.0, 0.5, 0.4, 2.0, 0.1, 2.0, 3.0, 0.2)\n" - ] - } - ], + "execution_count": null, + "outputs": [], "source": [ - "print(model.getParameters())" + "# In particular for interactive use, ReturnDataView.by_id() and amici.evaluate provides a more convenient way to access slices of the result:\n", + "# Time trajectory of observable observable_x1\n", + "print(f\"{rdata.by_id('observable_x1')=}\")\n", + "# Time trajectory of state variable x2\n", + "print(f\"{rdata.by_id('x2')=}\")" ], "metadata": { "collapsed": false, @@ -2037,6 +2034,9 @@ "toc_position": {}, "toc_section_display": true, "toc_window_display": false + }, + "nbsphinx": { + "execute": "always" } }, "nbformat": 4,