diff --git a/.github/actions/install-macos-dependencies/action.yml b/.github/actions/install-macos-dependencies/action.yml new file mode 100644 index 0000000000..b19cac1052 --- /dev/null +++ b/.github/actions/install-macos-dependencies/action.yml @@ -0,0 +1,31 @@ +name: Install AMICI dependencies for MacOS +description: Install AMICI dependencies for MacOS + +runs: + using: "composite" + steps: + # use all available cores + - run: echo "AMICI_PARALLEL_COMPILE=" >> $GITHUB_ENV + shell: bash + + # AMICI repository root + - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV + shell: bash + + # BioNetGen path + - run: echo "BNGPATH=${AMICI_DIR}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV + shell: bash + + # CMake hints + # Ensure CMake is using the python version that we will use for the python tests later on + - run: echo "PYTHON_EXECUTABLE=${Python3_ROOT_DIR}/bin/python3" >> $GITHUB_ENV + shell: bash + - run: echo "OpenMP_ROOT=$(brew --prefix)/opt/libomp" >> $GITHUB_ENV + shell: bash + - run: echo "BOOST_ROOT=$(brew --prefix)/opt/boost" >> $GITHUB_ENV + shell: bash + + # install amici dependencies + - name: homebrew + run: brew install hdf5 swig gcc libomp boost + shell: bash diff --git a/.github/actions/setup-sonar-tools/action.yml b/.github/actions/setup-sonar-tools/action.yml index d791c120bf..154824c70a 100644 --- a/.github/actions/setup-sonar-tools/action.yml +++ b/.github/actions/setup-sonar-tools/action.yml @@ -16,7 +16,8 @@ runs: - name: Install sonarcloud tools run: | - sudo apt-get install nodejs curl unzip \ + sudo apt-get update \ + && sudo apt-get install nodejs curl unzip \ && curl --create-dirs -sSLo $HOME/.sonar/sonar-scanner.zip \ https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-$SONAR_SCANNER_VERSION-linux.zip \ && unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ \ diff --git a/.github/workflows/deploy_branch.yml b/.github/workflows/deploy_branch.yml index 73294286a1..82d1452d37 100644 --- a/.github/workflows/deploy_branch.yml +++ b/.github/workflows/deploy_branch.yml @@ -1,5 +1,5 @@ name: Deploy Branch -on: [push, merge_group, workflow_dispatch] +on: [push, pull_request, merge_group, workflow_dispatch] jobs: sdist: diff --git a/.github/workflows/test_doc.yml b/.github/workflows/test_doc.yml index 07e0afdcc8..7a60aa99d5 100644 --- a/.github/workflows/test_doc.yml +++ b/.github/workflows/test_doc.yml @@ -43,7 +43,7 @@ jobs: strategy: matrix: - python-version: [ "3.10" ] + python-version: [ "3.11" ] steps: - name: Set up Python ${{ matrix.python-version }} @@ -67,10 +67,10 @@ jobs: sudo apt-get update \ && sudo apt-get install -y \ pandoc \ - python3-venv + && pip install tox - name: Set up SWIG uses: ./.github/actions/setup-swig - name: Run sphinx - run: scripts/run-sphinx.sh + run: tox -e doc diff --git a/.github/workflows/test_install.yml b/.github/workflows/test_install.yml index be74cfa4c6..166616b434 100644 --- a/.github/workflows/test_install.yml +++ b/.github/workflows/test_install.yml @@ -1,5 +1,5 @@ name: Installation -on: [push, merge_group, workflow_dispatch] +on: [push, pull_request, merge_group, workflow_dispatch] jobs: archive: diff --git a/.github/workflows/test_matlab.yml b/.github/workflows/test_matlab.yml index d51cc3fbf6..914ce19f09 100644 --- a/.github/workflows/test_matlab.yml +++ b/.github/workflows/test_matlab.yml @@ -1,5 +1,12 @@ name: Matlab -on: [push, merge_group, workflow_dispatch] +on: + push: + merge_group: + workflow_dispatch: + pull_request: + branches: + - master + jobs: matlab: diff --git a/.github/workflows/test_petab_test_suite.yml b/.github/workflows/test_petab_test_suite.yml index d5c4dc4fe8..9b48f21d95 100644 --- a/.github/workflows/test_petab_test_suite.yml +++ b/.github/workflows/test_petab_test_suite.yml @@ -72,7 +72,7 @@ jobs: run: | source ./build/venv/bin/activate \ && pytest --cov-report=xml:coverage.xml \ - --cov=./ python/tests/test_*petab*.py + --cov=./ python/tests/test_*petab*.py python/tests/petab/ # run test models - name: Run PEtab test suite @@ -85,6 +85,7 @@ jobs: tests/petab_test_suite/ - name: Codecov + if: github.event_name == 'pull_request' || github.repository_owner == 'AMICI-dev' uses: codecov/codecov-action@v3.1.0 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/test_pypi.yml b/.github/workflows/test_pypi.yml index 4f3533850f..68675c578a 100644 --- a/.github/workflows/test_pypi.yml +++ b/.github/workflows/test_pypi.yml @@ -11,7 +11,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.9", "3.10", "3.11", "3.12"] os: [ubuntu-22.04, macos-latest] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test_python_cplusplus.yml b/.github/workflows/test_python_cplusplus.yml index a531d2db2c..5aa47173b8 100644 --- a/.github/workflows/test_python_cplusplus.yml +++ b/.github/workflows/test_python_cplusplus.yml @@ -6,6 +6,7 @@ on: pull_request: branches: - master + - develop jobs: ubuntu-cpp-python-tests: @@ -65,6 +66,7 @@ jobs: ${AMICI_DIR}/python/tests/test_splines.py - name: Codecov Python + if: github.event_name == 'pull_request' || github.repository_owner == 'AMICI-dev' uses: codecov/codecov-action@v3.1.0 with: token: ${{ secrets.CODECOV_TOKEN }} @@ -84,6 +86,7 @@ jobs: && lcov -a coverage_cpp.info -a coverage_py.info -o coverage.info - name: Codecov CPP + if: github.event_name == 'pull_request' || github.repository_owner == 'AMICI-dev' uses: codecov/codecov-action@v3.1.0 with: token: ${{ secrets.CODECOV_TOKEN }} @@ -92,6 +95,7 @@ jobs: fail_ci_if_error: true - name: Run sonar-scanner + if: ${{ env.SONAR_TOKEN != '' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} @@ -137,6 +141,7 @@ jobs: ${AMICI_DIR}/python/tests/test_splines_short.py - name: Codecov Python + if: github.event_name == 'pull_request' || github.repository_owner == 'AMICI-dev' uses: codecov/codecov-action@v3.1.0 with: token: ${{ secrets.CODECOV_TOKEN }} @@ -156,6 +161,7 @@ jobs: && lcov -a coverage_cpp.info -a coverage_py.info -o coverage.info - name: Codecov CPP + if: github.event_name == 'pull_request' || github.repository_owner == 'AMICI-dev' uses: codecov/codecov-action@v3.1.0 with: token: ${{ secrets.CODECOV_TOKEN }} @@ -164,6 +170,7 @@ jobs: fail_ci_if_error: true - name: Run sonar-scanner + if: ${{ env.SONAR_TOKEN != '' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} @@ -211,8 +218,8 @@ jobs: # TODO: Include notebooks in coverage report - osx: - name: Tests OSX + macos_cpp_py: + name: Tests MacOS C++/Python runs-on: macos-latest steps: @@ -224,16 +231,11 @@ jobs: - uses: actions/checkout@v3 - run: git fetch --prune --unshallow - - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV - - run: echo "BNGPATH=${AMICI_DIR}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV - # Ensure CMake is using the python version that we will use for the python tests later on - - run: echo "PYTHON_EXECUTABLE=${Python3_ROOT_DIR}/bin/python3" >> $GITHUB_ENV - - run: echo "OpenMP_ROOT=$(brew --prefix)/opt/libomp" >> $GITHUB_ENV - - run: echo "BOOST_ROOT=$(brew --prefix)/opt/boost" >> $GITHUB_ENV + - name: Install dependencies + uses: ./.github/actions/install-macos-dependencies - # install amici dependencies - name: homebrew - run: brew install hdf5 swig gcc cppcheck libomp boost + run: brew install cppcheck - name: Build AMICI run: scripts/buildAll.sh @@ -247,8 +249,47 @@ jobs: - name: cppcheck run: scripts/run-cppcheck.sh - - name: Python tests - run: scripts/run-python-tests.sh - - name: C++ tests run: scripts/run-cpp-tests.sh + + - name: Python tests + run: | + scripts/run-python-tests.sh \ + test_pregenerated_models.py \ + test_splines_short.py \ + test_misc.py + + + macos_python: + name: Tests MacOS Python + runs-on: macos-latest + + steps: + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.9 + + - uses: actions/checkout@v3 + - run: git fetch --prune --unshallow + + - name: Install dependencies + uses: ./.github/actions/install-macos-dependencies + + - name: Install python package + run: | + pip show numpy > /dev/null || python3 -m pip install numpy + scripts/installAmiciSource.sh + + - name: Check OpenMP support + run: source build/venv/bin/activate && python -c "import amici; import sys; sys.exit(not amici.compiledWithOpenMP())" + + - name: Get BioNetGen + run: scripts/buildBNGL.sh + + - name: Python tests + run: | + scripts/run-python-tests.sh \ + --ignore=test_pregenerated_models.py \ + --ignore=test_splines_short.py \ + --ignore=test_misc.py diff --git a/.github/workflows/test_python_ver_matrix.yml b/.github/workflows/test_python_ver_matrix.yml index 866a3fc0f7..9290cd0c1a 100644 --- a/.github/workflows/test_python_ver_matrix.yml +++ b/.github/workflows/test_python_ver_matrix.yml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.9', '3.10', '3.11'] + python-version: ['3.9', '3.10', '3.11', '3.12'] experimental: [false] steps: @@ -44,15 +44,27 @@ jobs: - name: Install apt dependencies uses: ./.github/actions/install-apt-dependencies - # install AMICI - name: Build BNGL run: scripts/buildBNGL.sh + - name: Install python package run: scripts/installAmiciSource.sh + # until https://github.com/dateutil/dateutil >2.8.2 is released https://github.com/dateutil/dateutil/issues/1314 + - run: source build/venv/bin/activate && pip3 install git+https://github.com/dateutil/dateutil.git@296d419fe6bf3b22897f8f210735ac9c4e1cb796 + if: matrix.python-version == '3.12' + + # install pysb before sympy to allow for sympy>=1.12 (https://github.com/pysb/pysb/commit/e83937cb8c74afc9b2fa96595b68464946745f33) + - run: source build/venv/bin/activate && pip3 install git+https://github.com/pysb/pysb + + # until sympy>1.12 is released + - run: source build/venv/bin/activate && pip3 install git+https://github.com/sympy/sympy.git@master + if: matrix.python-version == '3.12' + - name: Python tests run: | source build/venv/bin/activate \ - && pip3 install git+https://github.com/pysb/pysb \ - && python3 -m pytest --ignore-glob=*petab* \ + && python3 -m pytest \ + --durations=10 \ + --ignore-glob=*petab* \ --ignore-glob=*test_splines.py ${AMICI_DIR}/python/tests diff --git a/.github/workflows/test_sbml_semantic_test_suite.yml b/.github/workflows/test_sbml_semantic_test_suite.yml index 0fde56b8f9..2af34d3762 100644 --- a/.github/workflows/test_sbml_semantic_test_suite.yml +++ b/.github/workflows/test_sbml_semantic_test_suite.yml @@ -54,6 +54,7 @@ jobs: path: tests/amici-semantic-results - name: Codecov SBMLSuite + if: github.event_name == 'pull_request' || github.repository_owner == 'AMICI-dev' uses: codecov/codecov-action@v3.1.0 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/test_windows.yml b/.github/workflows/test_windows.yml index 53834c3000..9dc13efbea 100644 --- a/.github/workflows/test_windows.yml +++ b/.github/workflows/test_windows.yml @@ -7,6 +7,7 @@ on: - cron: '48 4 * * *' pull_request: branches: + - develop - master jobs: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 521ff54f85..a2d00e00c1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,12 +1,6 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - name: isort (python) - args: ["--profile", "black", "--filter-files", "--line-length", "79"] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: @@ -16,15 +10,28 @@ repos: args: [--allow-multiple-documents] - id: end-of-file-fixer - id: trailing-whitespace -- repo: https://github.com/psf/black - rev: 23.7.0 +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.11 hooks: - - id: black-jupyter - # It is recommended to specify the latest version of Python - # supported by your project here, or alternatively use - # pre-commit's default_language_version, see - # https://pre-commit.com/#top_level-default_language_version - language_version: python3.11 - args: ["--line-length", "79"] + # Run the linter. + - id: ruff + args: + - --fix + - --config + - python/sdist/pyproject.toml + + # Run the formatter. + - id: ruff-format + args: + - --config + - python/sdist/pyproject.toml + +- repo: https://github.com/asottile/pyupgrade + rev: v3.15.0 + hooks: + - id: pyupgrade + args: ["--py39-plus"] + additional_dependencies: [pyupgrade==3.15.0] exclude: '^(ThirdParty|models)/' diff --git a/.readthedocs.yml b/.readthedocs.yml index 6590157647..38c2e8e41b 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -24,4 +24,4 @@ build: - libatlas-base-dev - swig tools: - python: "3.9" + python: "3.11" diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b511d92d0..513cf82efc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,93 @@ ## v0.X Series +### v0.21.0 (2024-01-16) + +**Deprecations** + +* Moved PEtab-related functionality from `amici.petab_*` to the + petab-subpackage `amici.petab.*`. The old public functions are still + available but will be removed in a future release. + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2205, + https://github.com/AMICI-dev/AMICI/pull/2211, + https://github.com/AMICI-dev/AMICI/pull/2252 + +**Features** + +* Handle events occurring at fixed timepoints without root-finding. + This avoids event-after-reinitialization errors in many cases a brings a + slight performance improvement. + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2227 +* Added `PetabProblem` class for handling PEtab-defined simulation conditions, + making it easier to perform customized operations based on PEtab-defined + simulation conditions. + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2255 +* code-gen: Simplified `switch` statements, leading to reduced file sizes and + faster compilation for certain models. + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2240 +* Made `Model` and `ModelPtr` deepcopyable + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2247 +* Made `Solver` and `SolverPtr` deepcopyable + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2245 +* Added a debugging helper `get_model_for_preeq` for debugging simulation + issues during pre-equilibration. + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2250 +* Added `SwigPtrView` fields to `dir()` outputs + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2244 +* Use proper labels for in plotting functions if IDs are available in + `ReturnData`. + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2249 +* Added `ExpData::clear_observations` to set all measurements/sigmas to NaN + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2258 + +**Fixes** + +* Fixed AMICI hiding all warnings. Previously, importing `amici` resulted + in all warnings being hidden in the rest of the program. + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2243 +* CMake: Fixed model debug builds + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2222 +* Fixed CMake potentially using incorrect Python library for building model + extension + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2220 +* CMake: fixed cxx flag check + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2225 +* Fixed potential out-of-bounds read in `Model::checkFinite` for + matlab-imported models + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2232 +* Fixed piecewise/Heaviside handling + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2234 +* Deterministic order of event assignments + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2242 +* Proper error message in case of unsupported state-dependent sigmas + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2239 +* Fixed swig shadow warning + other linting issues + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2261 +* Fixed `SwigPtrView.__getattr__` + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2259 +* `simulate_petab`: Avoid warning when simulating with default parameters + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2265 + +**Documentation** + +* Updated Python package installation instructions for Arch Linux + by @willov in https://github.com/AMICI-dev/AMICI/pull/2212 +* Updated `ExpData` documentation + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2254 +* Documented simulation starting time `t0` + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2263 +* Updated PEtab example + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2255 + +... + +**Full Changelog**: https://github.com/AMICI-dev/AMICI/compare/v0.20.0...v0.21.0 + ### v0.20.0 (2023-11-23) **Fixes** -* Fixed CMake cmake_minimum_required deprecation warning +* Fixed CMake `cmake_minimum_required` deprecation warning by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2183 * Fixed misleading preequilibration failure messages by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2181 diff --git a/ThirdParty/gsl/gsl/gsl-lite.hpp b/ThirdParty/gsl/gsl/gsl-lite.hpp index 380fb2f34b..f8e46ae417 100644 --- a/ThirdParty/gsl/gsl/gsl-lite.hpp +++ b/ThirdParty/gsl/gsl/gsl-lite.hpp @@ -3,7 +3,7 @@ // For more information see https://github.com/gsl-lite/gsl-lite // // Copyright (c) 2015-2019 Martin Moene -// Copyright (c) 2019-2021 Moritz Beutel +// Copyright (c) 2019-2023 Moritz Beutel // Copyright (c) 2015-2018 Microsoft Corporation. All rights reserved. // // This code is licensed under the MIT License (MIT). @@ -31,7 +31,7 @@ #include // for abort() #define gsl_lite_MAJOR 0 -#define gsl_lite_MINOR 40 +#define gsl_lite_MINOR 41 #define gsl_lite_PATCH 0 #define gsl_lite_VERSION gsl_STRINGIFY(gsl_lite_MAJOR) "." gsl_STRINGIFY(gsl_lite_MINOR) "." gsl_STRINGIFY(gsl_lite_PATCH) @@ -183,6 +183,15 @@ #endif #define gsl_FEATURE_OWNER_MACRO_() gsl_FEATURE_OWNER_MACRO +//#if defined( gsl_FEATURE_STRING_SPAN ) +//# if ! gsl_CHECK_CFG_TOGGLE_VALUE_( gsl_FEATURE_STRING_SPAN ) +//# pragma message ("invalid configuration value gsl_FEATURE_STRING_SPAN=" gsl_STRINGIFY(gsl_FEATURE_STRING_SPAN) ", must be 0 or 1") +//# endif +//#else +//# define gsl_FEATURE_STRING_SPAN (gsl_CONFIG_DEFAULTS_VERSION == 0) // default +//#endif +//#define gsl_FEATURE_STRING_SPAN_() gsl_FEATURE_STRING_SPAN + #if defined( gsl_FEATURE_EXPERIMENTAL_RETURN_GUARD ) # if ! gsl_CHECK_CFG_TOGGLE_VALUE_( gsl_FEATURE_EXPERIMENTAL_RETURN_GUARD ) # pragma message ("invalid configuration value gsl_FEATURE_EXPERIMENTAL_RETURN_GUARD=" gsl_STRINGIFY(gsl_FEATURE_EXPERIMENTAL_RETURN_GUARD) ", must be 0 or 1") @@ -217,7 +226,7 @@ #if ! defined( gsl_CONFIG_DEPRECATE_TO_LEVEL ) # if gsl_CONFIG_DEFAULTS_VERSION >= 1 -# define gsl_CONFIG_DEPRECATE_TO_LEVEL 6 +# define gsl_CONFIG_DEPRECATE_TO_LEVEL 7 # else # define gsl_CONFIG_DEPRECATE_TO_LEVEL 0 # endif @@ -301,6 +310,15 @@ #endif #define gsl_CONFIG_NARROW_THROWS_ON_TRUNCATION_() gsl_CONFIG_NARROW_THROWS_ON_TRUNCATION +#if defined( gsl_CONFIG_VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS ) +# if ! gsl_CHECK_CFG_TOGGLE_VALUE_( gsl_CONFIG_VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS ) +# pragma message ("invalid configuration value gsl_CONFIG_VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS=" gsl_STRINGIFY(gsl_CONFIG_VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS) ", must be 0 or 1") +# endif +#else +# define gsl_CONFIG_VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS 1 // default +#endif +#define gsl_CONFIG_VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS_() gsl_CONFIG_VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS + #if defined( gsl_CONFIG_CONTRACT_CHECKING_EXPECTS_OFF ) # if ! gsl_CHECK_CFG_NO_VALUE_( gsl_CONFIG_CONTRACT_CHECKING_EXPECTS_OFF ) # pragma message ("invalid configuration value gsl_CONFIG_CONTRACT_CHECKING_EXPECTS_OFF=" gsl_STRINGIFY(gsl_CONFIG_CONTRACT_CHECKING_EXPECTS_OFF) "; macro must be defined without value") @@ -474,7 +492,7 @@ # endif #endif -// C++ language version detection (C++20 is speculative): +// C++ language version detection (C++23 is speculative): // Note: VC14.0/1900 (VS2015) lacks too much from C++14. #ifndef gsl_CPLUSPLUS @@ -495,7 +513,8 @@ #define gsl_CPP11_OR_GREATER ( gsl_CPLUSPLUS >= 201103L ) #define gsl_CPP14_OR_GREATER ( gsl_CPLUSPLUS >= 201402L ) #define gsl_CPP17_OR_GREATER ( gsl_CPLUSPLUS >= 201703L ) -#define gsl_CPP20_OR_GREATER ( gsl_CPLUSPLUS >= 202000L ) +#define gsl_CPP20_OR_GREATER ( gsl_CPLUSPLUS >= 202002L ) +#define gsl_CPP23_OR_GREATER ( gsl_CPLUSPLUS > 202002L ) // tentative // C++ language version (represent 98 as 3): @@ -517,6 +536,7 @@ // MSVC++ 14.0 _MSC_VER == 1900 gsl_COMPILER_MSVC_VERSION == 140 (Visual Studio 2015) // MSVC++ 14.1 _MSC_VER >= 1910 gsl_COMPILER_MSVC_VERSION == 141 (Visual Studio 2017) // MSVC++ 14.2 _MSC_VER >= 1920 gsl_COMPILER_MSVC_VERSION == 142 (Visual Studio 2019) +// MSVC++ 14.3 _MSC_VER >= 1930 gsl_COMPILER_MSVC_VERSION == 143 (Visual Studio 2022) #if defined( _MSC_VER ) && ! defined( __clang__ ) # define gsl_COMPILER_MSVC_VER (_MSC_VER ) @@ -551,7 +571,10 @@ // AppleClang 11.0.3 __apple_build_version__ == 11030032 gsl_COMPILER_APPLECLANG_VERSION == 1103 (Xcode 11.4, 11.4.1, 11.5, 11.6) (LLVM 9.0.0) // AppleClang 12.0.0 __apple_build_version__ == 12000032 gsl_COMPILER_APPLECLANG_VERSION == 1200 (Xcode 12.0–12.4) (LLVM 10.0.0) // AppleClang 12.0.5 __apple_build_version__ == 12050022 gsl_COMPILER_APPLECLANG_VERSION == 1205 (Xcode 12.5) (LLVM 11.1.0) -// AppleClang 13.0.0 __apple_build_version__ == 13000029 gsl_COMPILER_APPLECLANG_VERSION == 1300 (Xcode 13.0) (LLVM 12.0.0) +// AppleClang 13.0.0 __apple_build_version__ == 13000029 gsl_COMPILER_APPLECLANG_VERSION == 1300 (Xcode 13.0–13.2.1) (LLVM 12.0.0) +// AppleClang 13.1.6 __apple_build_version__ == 13160021 gsl_COMPILER_APPLECLANG_VERSION == 1316 (Xcode 13.3–13.4.1) (LLVM 13.0.0) +// AppleClang 14.0.0 __apple_build_version__ == 14000029 gsl_COMPILER_APPLECLANG_VERSION == 1400 (Xcode 14.0–14.2) (LLVM 14.0.0) +// AppleClang 14.0.3 __apple_build_version__ == 14030022 gsl_COMPILER_APPLECLANG_VERSION == 1403 (Xcode 14.3) (LLVM 15.0.0) #if defined( __apple_build_version__ ) # define gsl_COMPILER_APPLECLANG_VERSION gsl_COMPILER_VERSION( __clang_major__, __clang_minor__, __clang_patchlevel__ ) @@ -611,7 +634,7 @@ // Presence of wide character support: -#ifdef __DJGPP__ +#if defined(__DJGPP__) || (defined(_LIBCPP_VERSION) && defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)) # define gsl_HAVE_WCHAR 0 #else # define gsl_HAVE_WCHAR 1 @@ -768,6 +791,12 @@ #define gsl_HAVE_CONSTEXPR_20_() gsl_HAVE_CONSTEXPR_20 +// Presence of C++23 language features: + +#define gsl_HAVE_CONSTEXPR_23 gsl_CPP23_OR_GREATER + +#define gsl_HAVE_CONSTEXPR_23_() gsl_HAVE_CONSTEXPR_23 + // Presence of C++ library features: #if gsl_BETWEEN( gsl_COMPILER_ARMCC_VERSION, 1, 600 ) @@ -777,12 +806,14 @@ # define gsl_STDLIB_CPP14_OR_GREATER 0 # define gsl_STDLIB_CPP17_OR_GREATER 0 # define gsl_STDLIB_CPP20_OR_GREATER 0 +# define gsl_STDLIB_CPP23_OR_GREATER 0 #else # define gsl_STDLIB_CPP98_OR_GREATER gsl_CPP98_OR_GREATER # define gsl_STDLIB_CPP11_OR_GREATER gsl_CPP11_OR_GREATER # define gsl_STDLIB_CPP14_OR_GREATER gsl_CPP14_OR_GREATER # define gsl_STDLIB_CPP17_OR_GREATER gsl_CPP17_OR_GREATER # define gsl_STDLIB_CPP20_OR_GREATER gsl_CPP20_OR_GREATER +# define gsl_STDLIB_CPP23_OR_GREATER gsl_CPP23_OR_GREATER #endif #define gsl_STDLIB_CPP11_100 (gsl_STDLIB_CPP11_OR_GREATER || gsl_COMPILER_MSVC_VER >= 1600) @@ -890,6 +921,12 @@ # define gsl_constexpr20 /*constexpr*/ #endif +#if gsl_HAVE( CONSTEXPR_23 ) +# define gsl_constexpr23 constexpr +#else +# define gsl_constexpr23 /*constexpr*/ +#endif + #if gsl_HAVE( EXPLICIT ) # define gsl_explicit explicit #else @@ -1180,7 +1217,7 @@ namespace __cxxabiv1 { struct __cxa_eh_globals; extern "C" __cxa_eh_globals * __ #endif // gsl_FEATURE( EXPERIMENTAL_RETURN_GUARD ) -// MSVC warning suppression macros: +// Warning suppression macros: #if gsl_COMPILER_MSVC_VERSION >= 140 && ! gsl_COMPILER_NVCC_VERSION # define gsl_SUPPRESS_MSGSL_WARNING(expr) [[gsl::suppress(expr)]] @@ -1195,6 +1232,18 @@ namespace __cxxabiv1 { struct __cxa_eh_globals; extern "C" __cxa_eh_globals * __ # define gsl_RESTORE_MSVC_WARNINGS() #endif +// Warning suppressions: + +#if gsl_COMPILER_CLANG_VERSION || gsl_COMPILER_APPLECLANG_VERSION +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wweak-vtables" // because of `fail_fast` and `narrowing_error` +#endif // gsl_COMPILER_CLANG_VERSION || gsl_COMPILER_APPLECLANG_VERSION + +#if gsl_COMPILER_GNUC_VERSION +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wuseless-cast" // we use `static_cast<>()` in several places where it is possibly redundant depending on the configuration of the library +#endif // gsl_COMPILER_GNUC_VERSION + // Suppress the following MSVC GSL warnings: // - C26432: gsl::c.21 : if you define or delete any default operation in the type '...', define or delete them all // - C26410: gsl::r.32 : the parameter 'ptr' is a reference to const unique pointer, use const T* or const T& instead @@ -1214,6 +1263,12 @@ namespace __cxxabiv1 { struct __cxa_eh_globals; extern "C" __cxa_eh_globals * __ // - C26457: es.48 : (void) should not be used to ignore return values, use 'std::ignore =' instead gsl_DISABLE_MSVC_WARNINGS( 26432 26410 26415 26418 26472 26439 26440 26455 26473 26481 26482 26446 26490 26487 26457 ) +#if gsl_BETWEEN( gsl_COMPILER_MSVC_VERSION, 110, 140 ) // VS 2012 and 2013 +# pragma warning(disable: 4127) // conditional expression is constant +#endif // gsl_BETWEEN( gsl_COMPILER_MSVC_VERSION, 110, 140 ) +#if gsl_COMPILER_MSVC_VERSION == 140 // VS 2015 +# pragma warning(disable: 4577) // 'noexcept' used with no exception handling mode specified; termination on exception is not guaranteed. Specify /EHsc +#endif // gsl_COMPILER_MSVC_VERSION == 140 namespace gsl { @@ -1558,6 +1613,12 @@ template< class T > struct remove_cvref { typedef typename std11::remove_cv< typ } // namespace std20 +// C++23 emulation: + +namespace std23 { + +} // namespace std23 + namespace detail { /// for gsl_ENABLE_IF_() @@ -1723,7 +1784,7 @@ typedef gsl_CONFIG_INDEX_TYPE diff; // GSL.assert: assertions // -#if gsl_HAVE( TYPE_TRAITS ) +#if gsl_HAVE( TYPE_TRAITS ) && gsl_CONFIG( VALIDATES_UNENFORCED_CONTRACT_EXPRESSIONS ) # define gsl_ELIDE_( x ) static_assert( ::std::is_constructible::value, "argument of contract check must be convertible to bool" ) #else # define gsl_ELIDE_( x ) @@ -1742,7 +1803,7 @@ typedef gsl_CONFIG_INDEX_TYPE diff; #if gsl_DEVICE_CODE # if defined( gsl_CONFIG_DEVICE_UNENFORCED_CONTRACTS_ASSUME ) # if gsl_COMPILER_NVCC_VERSION >= 113 -# define gsl_ASSUME_( x ) ( ( x ) ? static_cast(0) : __builtin_unreachable() ) +# define gsl_ASSUME_( x ) ( __builtin_assume( !!( x ) ) ) # define gsl_ASSUME_UNREACHABLE_() __builtin_unreachable() # else // unknown device compiler # error gsl_CONFIG_DEVICE_UNENFORCED_CONTRACTS_ASSUME: gsl-lite does not know how to generate UB optimization hints in device code for this compiler; use gsl_CONFIG_DEVICE_UNENFORCED_CONTRACTS_ELIDE instead @@ -2340,7 +2401,7 @@ namespace detail { #endif template< class T, class U > -gsl_NODISCARD +gsl_NODISCARD gsl_constexpr14 #if !gsl_CONFIG( NARROW_THROWS_ON_TRUNCATION ) && !defined( gsl_CONFIG_CONTRACT_VIOLATION_THROWS ) gsl_api #endif @@ -2398,7 +2459,7 @@ narrow( U u ) } template< class T, class U > -gsl_NODISCARD gsl_api inline T +gsl_NODISCARD gsl_api gsl_constexpr14 inline T narrow_failfast( U u ) { T t = static_cast( u ); @@ -3261,8 +3322,8 @@ class not_null_ic : public not_null > gsl_api gsl_constexpr14 #if gsl_HAVE( MOVE_FORWARD ) - not_null_ic( U && u ) - : not_null( std::forward( u ) ) + not_null_ic( U u ) + : not_null( std::move( u ) ) #else // ! gsl_HAVE( MOVE_FORWARD ) not_null_ic( U const & u ) : not_null( u ) @@ -4393,6 +4454,7 @@ byte_span( T const & t ) gsl_noexcept #endif // gsl_FEATURE_TO_STD( BYTE_SPAN ) +//#if gsl_FEATURE( STRING_SPAN ) // // basic_string_span: // @@ -4465,19 +4527,31 @@ class basic_string_span #ifdef __CUDACC_RELAXED_CONSTEXPR__ gsl_api #endif // __CUDACC_RELAXED_CONSTEXPR__ +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( pointer ptr ) : span_( remove_z( ptr, (std::numeric_limits::max)() ) ) {} +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_api gsl_constexpr basic_string_span( pointer ptr, index_type count ) : span_( ptr, count ) {} +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_api gsl_constexpr basic_string_span( pointer firstElem, pointer lastElem ) : span_( firstElem, lastElem ) {} template< std::size_t N > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( element_type (&arr)[N] ) : span_( remove_z( gsl_ADDRESSOF( arr[0] ), N ) ) {} @@ -4485,11 +4559,17 @@ class basic_string_span #if gsl_HAVE( ARRAY ) template< std::size_t N > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( std::array< typename std11::remove_const::type, N> & arr ) : span_( remove_z( arr ) ) {} template< std::size_t N > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( std::array< typename std11::remove_const::type, N> const & arr ) : span_( remove_z( arr ) ) {} @@ -4508,6 +4588,9 @@ class basic_string_span && std::is_convertible< typename Container::pointer, decltype(std::declval().data()) >::value )) > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( Container & cont ) : span_( ( cont ) ) {} @@ -4522,6 +4605,9 @@ class basic_string_span && std::is_convertible< typename Container::pointer, decltype(std::declval().data()) >::value )) > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( Container const & cont ) : span_( ( cont ) ) {} @@ -4529,11 +4615,17 @@ class basic_string_span #elif gsl_HAVE( UNCONSTRAINED_SPAN_CONTAINER_CTOR ) template< class Container > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( Container & cont ) : span_( cont ) {} template< class Container > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( Container const & cont ) : span_( cont ) {} @@ -4541,6 +4633,9 @@ class basic_string_span #else template< class U > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_api gsl_constexpr basic_string_span( span const & rhs ) : span_( rhs ) {} @@ -4550,6 +4645,9 @@ class basic_string_span #if gsl_FEATURE_TO_STD( WITH_CONTAINER ) template< class Container > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( with_container_t, Container & cont ) : span_( with_container, cont ) {} @@ -4570,6 +4668,9 @@ class basic_string_span template< class U gsl_ENABLE_IF_(( std::is_convertible::pointer, pointer>::value )) > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_api gsl_constexpr basic_string_span( basic_string_span const & rhs ) : span_( reinterpret_cast( rhs.data() ), rhs.length() ) // NOLINT {} @@ -4578,18 +4679,27 @@ class basic_string_span template< class U gsl_ENABLE_IF_(( std::is_convertible::pointer, pointer>::value )) > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_api gsl_constexpr basic_string_span( basic_string_span && rhs ) : span_( reinterpret_cast( rhs.data() ), rhs.length() ) // NOLINT {} #endif // gsl_STDLIB_CPP11_120 template< class CharTraits, class Allocator > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( std::basic_string< typename std11::remove_const::type, CharTraits, Allocator > & str ) : span_( gsl_ADDRESSOF( str[0] ), str.length() ) {} template< class CharTraits, class Allocator > +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_string_span<><> is deprecated; use span<> instead") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_constexpr basic_string_span( std::basic_string< typename std11::remove_const::type, CharTraits, Allocator > const & str ) : span_( gsl_ADDRESSOF( str[0] ), str.length() ) @@ -4933,6 +5043,7 @@ as_bytes( basic_string_span spn ) gsl_noexcept { return span< const byte >( reinterpret_cast( spn.data() ), spn.size_bytes() ); // NOLINT } +//#endif // gsl_FEATURE( STRING_SPAN ) // // String types: @@ -4946,6 +5057,8 @@ typedef wchar_t * wzstring; typedef const wchar_t * cwzstring; #endif +//#if gsl_FEATURE( STRING_SPAN ) + typedef basic_string_span< char > string_span; typedef basic_string_span< char const > cstring_span; @@ -5066,6 +5179,7 @@ std::basic_ostream< wchar_t, Traits > & operator<<( std::basic_ostream< wchar_t, } #endif // gsl_HAVE( WCHAR ) +//#endif // gsl_FEATURE( STRING_SPAN ) // // ensure_sentinel() @@ -5124,6 +5238,7 @@ ensure_z( Container & cont ) } # endif +//#if gsl_FEATURE( STRING_SPAN ) // // basic_zstring_span<> - A view of contiguous null-terminated characters, replace (*,len). // @@ -5141,6 +5256,9 @@ class basic_zstring_span typedef element_type * czstring_type; typedef basic_string_span string_span_type; +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_zstring_span<> is deprecated") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_api gsl_constexpr14 basic_zstring_span( span_type s ) : span_( s ) { @@ -5164,13 +5282,19 @@ class basic_zstring_span return false; } +#if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_zstring_span<> is deprecated") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) gsl_NODISCARD gsl_api gsl_constexpr string_span_type as_string_span() const gsl_noexcept { return string_span_type( span_.data(), span_.size() - 1 ); } - /*gsl_api*/ // currently disabled due to an apparent NVCC bug + #if gsl_DEPRECATE_TO_LEVEL( 7 ) + gsl_DEPRECATED_MSG("basic_zstring_span<> is deprecated") +#endif // gsl_DEPRECATE_TO_LEVEL( 7 ) + /*gsl_api*/ // currently disabled due to an apparent NVCC bug gsl_NODISCARD gsl_constexpr string_span_type ensure_z() const { @@ -5198,6 +5322,7 @@ typedef basic_zstring_span< char const > czstring_span; typedef basic_zstring_span< wchar_t > wzstring_span; typedef basic_zstring_span< wchar_t const > cwzstring_span; #endif +//#endif // gsl_FEATURE( STRING_SPAN ) } // namespace gsl @@ -5306,11 +5431,13 @@ namespace std11 = ::gsl::std11; namespace std14 = ::gsl::std14; namespace std17 = ::gsl::std17; namespace std20 = ::gsl::std20; +namespace std23 = ::gsl::std23; using namespace std11; //using namespace std14; // contains only make_unique<>(), which is superseded by `gsl::make_unique<>()` using namespace std17; using namespace std20; +using namespace std23; using namespace ::gsl::detail::no_adl; @@ -5380,6 +5507,7 @@ using ::gsl::as_writable_bytes; using ::gsl::as_writeable_bytes; # endif +//# if gsl_FEATURE( STRING_SPAN ) using ::gsl::basic_string_span; using ::gsl::string_span; using ::gsl::cstring_span; @@ -5387,6 +5515,7 @@ using ::gsl::cstring_span; using ::gsl::basic_zstring_span; using ::gsl::zstring_span; using ::gsl::czstring_span; +//# endif // gsl_FEATURE( STRING_SPAN ) using ::gsl::zstring; using ::gsl::czstring; @@ -5395,8 +5524,10 @@ using ::gsl::czstring; using ::gsl::wzstring; using ::gsl::cwzstring; +//# if gsl_FEATURE( STRING_SPAN ) using ::gsl::wzstring_span; using ::gsl::cwzstring_span; +//# endif // gsl_FEATURE( STRING_SPAN ) # endif // gsl_HAVE( WCHAR ) using ::gsl::ensure_z; @@ -5406,6 +5537,12 @@ using ::gsl::ensure_z; #endif // gsl_FEATURE( GSL_LITE_NAMESPACE ) gsl_RESTORE_MSVC_WARNINGS() +#if gsl_COMPILER_CLANG_VERSION || gsl_COMPILER_APPLECLANG_VERSION +# pragma clang diagnostic pop +#endif // gsl_COMPILER_CLANG_VERSION || gsl_COMPILER_APPLECLANG_VERSION +#if gsl_COMPILER_GNUC_VERSION +# pragma GCC diagnostic pop +#endif // gsl_COMPILER_GNUC_VERSION // #undef internal macros #undef gsl_STATIC_ASSERT_ diff --git a/documentation/ExampleJax.ipynb b/documentation/ExampleJax.ipynb index 1899305b67..c9fbb589e5 100644 --- a/documentation/ExampleJax.ipynb +++ b/documentation/ExampleJax.ipynb @@ -46,10 +46,10 @@ "output_type": "stream", "text": [ "Cloning into 'tmp/benchmark-models'...\n", - "remote: Enumerating objects: 336, done.\u001b[K\n", - "remote: Counting objects: 100% (336/336), done.\u001b[K\n", - "remote: Compressing objects: 100% (285/285), done.\u001b[K\n", - "remote: Total 336 (delta 88), reused 216 (delta 39), pack-reused 0\u001b[K\n", + "remote: Enumerating objects: 336, done.\u001B[K\n", + "remote: Counting objects: 100% (336/336), done.\u001B[K\n", + "remote: Compressing objects: 100% (285/285), done.\u001B[K\n", + "remote: Total 336 (delta 88), reused 216 (delta 39), pack-reused 0\u001B[K\n", "Receiving objects: 100% (336/336), 2.11 MiB | 7.48 MiB/s, done.\n", "Resolving deltas: 100% (88/88), done.\n" ] @@ -557,8 +557,7 @@ "clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX13.sdk -I/Users/fabian/Documents/projects/AMICI/documentation/amici_models/Boehm_JProteomeRes2014 -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/include -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/ThirdParty/gsl -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/ThirdParty/sundials/include -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/ThirdParty/SuiteSparse/include -I/opt/homebrew/Cellar/hdf5/1.12.2_2/include -I/Users/fabian/Documents/projects/AMICI/build/venv/include -I/opt/homebrew/opt/python@3.10/Frameworks/Python.framework/Versions/3.10/include/python3.10 -c swig/Boehm_JProteomeRes2014_wrap.cpp -o build/temp.macosx-13-arm64-cpython-310/swig/Boehm_JProteomeRes2014_wrap.o -std=c++14\n", "clang -Wno-unused-result -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX13.sdk -I/Users/fabian/Documents/projects/AMICI/documentation/amici_models/Boehm_JProteomeRes2014 -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/include -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/ThirdParty/gsl -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/ThirdParty/sundials/include -I/Users/fabian/Documents/projects/AMICI/python/sdist/amici/ThirdParty/SuiteSparse/include -I/opt/homebrew/Cellar/hdf5/1.12.2_2/include -I/Users/fabian/Documents/projects/AMICI/build/venv/include -I/opt/homebrew/opt/python@3.10/Frameworks/Python.framework/Versions/3.10/include/python3.10 -c wrapfunctions.cpp -o build/temp.macosx-13-arm64-cpython-310/wrapfunctions.o -std=c++14\n", "clang++ -bundle -undefined dynamic_lookup -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX13.sdk build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_Jy.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dJydsigma.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dJydy.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dJydy_colptrs.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dJydy_rowvals.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dsigmaydp.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdp.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdp_colptrs.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdp_rowvals.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdw.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdw_colptrs.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdw_rowvals.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdx.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdx_colptrs.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dwdx_rowvals.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dxdotdw.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dxdotdw_colptrs.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dxdotdw_rowvals.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_dydx.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_sigmay.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_sx0_fixedParameters.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_w.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_x0.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_x0_fixedParameters.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_x_rdata.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_x_solver.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_xdot.o build/temp.macosx-13-arm64-cpython-310/Boehm_JProteomeRes2014_y.o build/temp.macosx-13-arm64-cpython-310/swig/Boehm_JProteomeRes2014_wrap.o build/temp.macosx-13-arm64-cpython-310/wrapfunctions.o -L/opt/homebrew/Cellar/hdf5/1.12.2_2/lib -L/Users/fabian/Documents/projects/AMICI/python/sdist/amici/libs -lamici -lsundials -lsuitesparse -lcblas -lhdf5_hl_cpp -lhdf5_hl -lhdf5_cpp -lhdf5 -o /Users/fabian/Documents/projects/AMICI/documentation/amici_models/Boehm_JProteomeRes2014/Boehm_JProteomeRes2014/_Boehm_JProteomeRes2014.cpython-310-darwin.so\n", - "ld: warning: -undefined dynamic_lookup may not work with chained fixups\n", - "\n" + "ld: warning: -undefined dynamic_lookup may not work with chained fixups\n" ] }, { @@ -571,7 +570,7 @@ } ], "source": [ - "from amici.petab_import import import_petab_problem\n", + "from amici.petab.petab_import import import_petab_problem\n", "\n", "amici_model = import_petab_problem(petab_problem, force_compile=True)" ] @@ -589,7 +588,7 @@ "id": "e2ef051a", "metadata": {}, "source": [ - "For full jax support, we would have to implement a new [primitive](https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html), which would require quite a bit of engineering, and in the end wouldn't add much benefit since AMICI can't run on GPUs. Instead will interface AMICI using the experimental jax module [`host_callback`](https://jax.readthedocs.io/en/latest/jax.experimental.host_callback.html). " + "For full jax support, we would have to implement a new [primitive](https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html), which would require quite a bit of engineering, and in the end wouldn't add much benefit since AMICI can't run on GPUs. Instead, we will interface AMICI using the experimental jax module [`host_callback`](https://jax.readthedocs.io/en/latest/jax.experimental.host_callback.html). " ] }, { @@ -607,7 +606,7 @@ "metadata": {}, "outputs": [], "source": [ - "from amici.petab_objective import simulate_petab\n", + "from amici.petab.simulations import simulate_petab\n", "import amici\n", "\n", "amici_solver = amici_model.getSolver()\n", @@ -655,7 +654,7 @@ "id": "98e819bd", "metadata": {}, "source": [ - "Now we can finally define the JAX function that runs amici simulation using the host callback. We add a `custom_jvp` decorater so that we can define a custom jacobian vector product function in the next step. More details about custom jacobian vector product functions can be found in the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html)" + "Now we can finally define the JAX function that runs amici simulation using the host callback. We add a `custom_jvp` decorator so that we can define a custom jacobian vector product function in the next step. More details about custom jacobian vector product functions can be found in the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html)" ] }, { @@ -937,7 +936,7 @@ "metadata": {}, "source": [ "We see quite some differences in the gradient calculation. The primary reason is that running JAX in default configuration will use float32 precision for the parameters that are passed to AMICI, which uses float64, and the derivative of the parameter transformation \n", - "As AMICI simulations that run on the CPU are the most expensive operation, there is barely any tradeoff for using float32 vs float64 in JAX. Therefore we configure JAX to use float64 instead and rerun simulations." + "As AMICI simulations that run on the CPU are the most expensive operation, there is barely any tradeoff for using float32 vs float64 in JAX. Therefore, we configure JAX to use float64 instead and rerun simulations." ] }, { diff --git a/documentation/GettingStarted.ipynb b/documentation/GettingStarted.ipynb index 91fb9cb12c..1bacf00bef 100644 --- a/documentation/GettingStarted.ipynb +++ b/documentation/GettingStarted.ipynb @@ -14,7 +14,7 @@ "metadata": {}, "source": [ "## Model Compilation\n", - "Before simulations can be run, the model must be imported and compiled. In this process, AMICI performs all symbolic manipulations that later enable scalable simulations and efficient sensitivity computation. The first step towards model compilation is the creation of an [SbmlImporter](https://amici.readthedocs.io/en/latest/generated/amici.sbml_import.SbmlImporter.html) instance, which requires an SBML Document that specifies the model using the [Systems Biology Markup Language (SBML)](http://sbml.org/Main_Page). \n", + "Before simulations can be run, the model must be imported and compiled. In this process, AMICI performs all symbolic manipulations that later enable scalable simulations and efficient sensitivity computation. The first step towards model compilation is the creation of an [SbmlImporter](https://amici.readthedocs.io/en/latest/generated/amici.sbml_import.SbmlImporter.html) instance, which requires an SBML Document that specifies the model using the [Systems Biology Markup Language (SBML)](https://sbml.org/). \n", "\n", "For the purpose of this tutorial, we will use `model_steadystate_scaled.xml`, which is contained in the same directory as this notebook." ] @@ -113,7 +113,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Model simulations can be executed using the [amici.runAmiciSimulations](https://amici.readthedocs.io/en/latest/generated/amici.html#amici.runAmiciSimulation) routine. By default the model does not not contain any timepoints for which the model is to be simulated. Here we define a simulation timecourse with two timepoints at `0` and `1` and then run the simulation." + "Model simulations can be executed using the [amici.runAmiciSimulations](https://amici.readthedocs.io/en/latest/generated/amici.html#amici.runAmiciSimulation) routine. By default, the model does not contain any timepoints for which the model is to be simulated. Here we define a simulation timecourse with two timepoints at `0` and `1` and then run the simulation." ] }, { diff --git a/documentation/README.md b/documentation/README.md index af9f33320e..eece1de90e 100644 --- a/documentation/README.md +++ b/documentation/README.md @@ -12,7 +12,7 @@ The legacy GitHub Pages URL https://amici-dev.github.io/AMICI/ is set up as a redirect to RTD. The main configuration file is `documentation/conf.py` and the documentation -is generated using `scripts/run-sphinx.sh`. The documentation is written to +is generated using `tox -e doc`. The documentation is written to `documentation/_build/`. The documentation comprises: @@ -50,12 +50,6 @@ Matlab documentation is processed by [mtoc++](https://www.morepas.org/software/mtocpp/docs/tools.html). This is configured in `matlab/mtoc/config`. -#### Python documentation - -Python documentation is processed by doxygen and doxypypy using the script and -filters in `scripts/`. - - ## Writing documentation ### Out-of-source documentation diff --git a/documentation/conf.py b/documentation/conf.py index ba88b25a8d..8b2379a299 100644 --- a/documentation/conf.py +++ b/documentation/conf.py @@ -1,22 +1,22 @@ -# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config - import os import re import subprocess import sys +from enum import EnumType # need to import before setting typing.TYPE_CHECKING=True, fails otherwise import amici import exhale.deploy import exhale_multiproject_monkeypatch -import mock +from unittest import mock import pandas as pd +import sphinx import sympy as sp from exhale import configs as exhale_configs from sphinx.transforms.post_transforms import ReferencesResolver @@ -254,6 +254,7 @@ def install_doxygen(): autodoc_default_options = { "special-members": "__init__", "inherited-members": True, + "undoc-members": True, } # sphinx-autodoc-typehints @@ -567,7 +568,8 @@ def process_signature( return # only apply in the amici.amici module - if name.split(".")[1] != "amici": + split_name = name.split(".") + if len(split_name) < 2 or split_name[1] != "amici": return signature = fix_typehints(signature) @@ -605,7 +607,7 @@ def process_missing_ref(app, env, node, contnode): def skip_member(app, what, name, obj, skip, options): - ignored = [ + ignored_names = { "AbstractModel", "CVodeSolver", "IDASolver", @@ -614,7 +616,6 @@ def skip_member(app, what, name, obj, skip, options): "ConditionContext", "checkSigmaPositivity", "createGroup", - "createGroup", "equals", "printErrMsgIdAndTxt", "wrapErrHandlerFn", @@ -639,24 +640,45 @@ def skip_member(app, what, name, obj, skip, options): "stdVec2ndarray", "SwigPyIterator", "thisown", - ] + } - if name in ignored: + if name in ignored_names: return True if name.startswith("_") and name != "__init__": return True + obj_str = str(obj) + # ignore various functions for std::vector<> types - if re.match(r"^ python/sdist/amici/amici.py:docstring of amici.amici.FixedParameterContext.from_bytes:9: + # WARNING: Inline interpreted text or phrase reference start-string without end-string. + if ( + (qualname := getattr(obj, "__qualname__", "")) + and qualname == "int.to_bytes" + ) or ( + isinstance(getattr(obj, "__self__", None), EnumType) + and name == "from_bytes" + ): return True return None diff --git a/documentation/python_installation.rst b/documentation/python_installation.rst index 6cc3402e61..dee9bfd4fc 100644 --- a/documentation/python_installation.rst +++ b/documentation/python_installation.rst @@ -79,6 +79,13 @@ Install the AMICI dependencies via ``pacman`` sudo pacman -S python swig openblas gcc hdf5 boost-libs +Export the bash variables ``BLAS_CFLAGS`` and ``BLAS_LIBS`` to point to where BLAS was installed, e.g.: + +.. code-block:: bash + + export BLAS_CFLAGS="-I/usr/include/openblas/" + export BLAS_LIBS="-lopenblas" + Install AMICI: .. code-block:: bash @@ -99,7 +106,14 @@ Alternatively: sudo pacman -Su python swig openblas gcc hdf5 boost-libs -3. Install AMICI: +3. Export the bash variables ``BLAS_CFLAGS`` and ``BLAS_LIBS`` to point to where BLAS was installed, e.g.: + +.. code-block:: bash + + export BLAS_CFLAGS="-I/usr/include/openblas/" + export BLAS_LIBS="-lopenblas" + +4. Install AMICI: .. code-block:: bash diff --git a/documentation/python_modules.rst b/documentation/python_modules.rst index 5481865a7d..237a0a021f 100644 --- a/documentation/python_modules.rst +++ b/documentation/python_modules.rst @@ -11,6 +11,14 @@ AMICI Python API amici.sbml_import amici.pysb_import amici.bngl_import + amici.petab + amici.petab.import_helpers + amici.petab.parameter_mapping + amici.petab.petab_import + amici.petab.pysb_import + amici.petab.sbml_import + amici.petab.simulations + amici.petab.simulator amici.petab_import amici.petab_import_pysb amici.petab_objective diff --git a/documentation/recreate_reference_list.py b/documentation/recreate_reference_list.py index 034c884c4b..7750173ce1 100755 --- a/documentation/recreate_reference_list.py +++ b/documentation/recreate_reference_list.py @@ -20,7 +20,7 @@ def get_keys_by_year(bibfile): """Get bibtex entry keys as dict by year""" - with open(bibfile, "r") as f: + with open(bibfile) as f: db = biblib.bib.Parser().parse(f, log_fp=sys.stderr).get_entries() recoverer = biblib.messages.InputErrorRecoverer() by_year = {} diff --git a/documentation/rtd_requirements.txt b/documentation/rtd_requirements.txt index 64bc03e519..ae49f9dfbb 100644 --- a/documentation/rtd_requirements.txt +++ b/documentation/rtd_requirements.txt @@ -1,7 +1,7 @@ # NOTE: relative paths are expected to be relative to the repository root -sphinx<7 +sphinx mock>=5.0.2 -setuptools==67.7.2 +setuptools>=67.7.2 pysb>=1.11.0 matplotlib==3.7.1 nbsphinx==0.9.1 @@ -9,15 +9,15 @@ nbformat==5.8.0 recommonmark>=0.7.1 sphinx_rtd_theme>=1.2.0 petab[vis]>=0.2.0 -sphinx-autodoc-typehints==1.23.0 +sphinx-autodoc-typehints git+https://github.com/readthedocs/sphinx-hoverxref@main -ipython==8.13.2 -breathe==4.35.0 +ipython>=8.13.2 +breathe>=4.35.0 #exhale>=0.3.5 -e git+https://github.com/mithro/sphinx-contrib-mithro#egg=sphinx-contrib-exhale-multiproject&subdirectory=sphinx-contrib-exhale-multiproject -sphinxcontrib-matlabdomain<0.19.0 +sphinxcontrib-matlabdomain>=0.20.0 sphinxcontrib-napoleon>=0.7 -pygments==2.15.1 +pygments>=2.15.1 Jinja2==3.1.2 git+https://github.com/readthedocs/readthedocs-sphinx-ext ipykernel diff --git a/documentation/rtd_requirements2.txt b/documentation/rtd_requirements2.txt index 5a39f8e683..2c307f0ea4 100644 --- a/documentation/rtd_requirements2.txt +++ b/documentation/rtd_requirements2.txt @@ -1 +1 @@ -exhale>=0.3.6 +-e git+https://github.com/svenevs/exhale.git@a1a8551321e246e3ab81f5456e04a8159804595b#egg=exhale diff --git a/include/amici/defines.h b/include/amici/defines.h index 068b8a9d54..f5ed1e62bb 100644 --- a/include/amici/defines.h +++ b/include/amici/defines.h @@ -6,7 +6,6 @@ #endif #include -#include /* Math constants in case _USE_MATH_DEFINES is not supported */ #if defined(_USE_MATH_DEFINES) diff --git a/include/amici/edata.h b/include/amici/edata.h index f8639ca2eb..5d613c6262 100644 --- a/include/amici/edata.h +++ b/include/amici/edata.h @@ -14,24 +14,24 @@ class ReturnData; /** * @brief ExpData carries all information about experimental or - * condition-specific data + * condition-specific data. */ class ExpData : public SimulationParameters { public: /** - * @brief default constructor + * @brief Default constructor. */ ExpData() = default; /** - * @brief Copy constructor, needs to be declared to be generated in - * swig + * @brief Copy constructor. */ + // needs to be declared to be wrapped by SWIG ExpData(ExpData const&) = default; /** - * @brief constructor that only initializes dimensions + * @brief Constructor that only initializes dimensions. * * @param nytrue Number of observables * @param nztrue Number of event outputs @@ -154,6 +154,16 @@ class ExpData : public SimulationParameters { /** * @brief Set output timepoints. * + * If the number of timepoint increases, this will grow the + * observation/sigma matrices and fill new entries with NaN. + * If the number of timepoints decreases, this will shrink the + * observation/sigma matrices. + * + * Note that the mapping from timepoints to measurements will not be + * preserved. E.g., say there are measurements at t = 2, and this + * function is called with [1, 2], then the old measurements will belong to + * t = 1. + * * @param ts timepoints */ void setTimepoints(std::vector const& ts); @@ -225,7 +235,7 @@ class ExpData : public SimulationParameters { void setObservedDataStdDev(std::vector const& observedDataStdDev); /** - * @brief Set indentical standard deviation for all measurements. + * @brief Set identical standard deviation for all measurements. * * @param stdDev standard deviation (dimension: scalar) */ @@ -278,8 +288,7 @@ class ExpData : public SimulationParameters { realtype const* getObservedDataStdDevPtr(int it) const; /** - * @brief set function that copies observed event data from input to - * ExpData::observedEvents + * @brief Set observed event data. * * @param observedEvents observed data (dimension: nmaxevent x nztrue, * row-major) @@ -287,8 +296,7 @@ class ExpData : public SimulationParameters { void setObservedEvents(std::vector const& observedEvents); /** - * @brief set function that copies observed event data for specific event - * observable + * @brief Set observed event data for specific event observable. * * @param observedEvents observed data (dimension: nmaxevent) * @param iz observed event data index @@ -296,8 +304,7 @@ class ExpData : public SimulationParameters { void setObservedEvents(std::vector const& observedEvents, int iz); /** - * @brief get function that checks whether event data at specified indices - * has been set + * @brief Check whether event data at specified indices has been set. * * @param ie event index * @param iz event observable index @@ -306,25 +313,23 @@ class ExpData : public SimulationParameters { bool isSetObservedEvents(int ie, int iz) const; /** - * @brief get function that copies data from ExpData::mz to output + * @brief Get observed event data. * * @return observed event data */ std::vector const& getObservedEvents() const; /** - * @brief get function that returns a pointer to observed data at ieth - * occurrence + * @brief Get pointer to observed data at ie-th occurrence. * * @param ie event occurrence * - * @return pointer to observed event data at ieth occurrence + * @return pointer to observed event data at ie-th occurrence */ realtype const* getObservedEventsPtr(int ie) const; /** - * @brief set function that copies data from input to - * ExpData::observedEventsStdDev + * @brief Set standard deviation of observed event data. * * @param observedEventsStdDev standard deviation of observed event data */ @@ -332,16 +337,14 @@ class ExpData : public SimulationParameters { setObservedEventsStdDev(std::vector const& observedEventsStdDev); /** - * @brief set function that sets all ExpData::observedDataStdDev to the - * input value + * @brief Set standard deviation of observed event data. * * @param stdDev standard deviation (dimension: scalar) */ void setObservedEventsStdDev(realtype stdDev); /** - * @brief set function that copies standard deviation of observed data for - * specific observable + * @brief Set standard deviation of observed data for a specific observable. * * @param observedEventsStdDev standard deviation of observed data * (dimension: nmaxevent) @@ -352,8 +355,7 @@ class ExpData : public SimulationParameters { ); /** - * @brief set function that sets all standard deviation of a specific - * observable to the input value + * @brief Set all standard deviations of a specific event-observable. * * @param stdDev standard deviation (dimension: scalar) * @param iz observed data index @@ -361,8 +363,8 @@ class ExpData : public SimulationParameters { void setObservedEventsStdDev(realtype stdDev, int iz); /** - * @brief get function that checks whether standard deviation of even data - * at specified indices has been set + * @brief Check whether standard deviation of event data + * at specified indices has been set. * * @param ie event index * @param iz event observable index @@ -371,16 +373,15 @@ class ExpData : public SimulationParameters { bool isSetObservedEventsStdDev(int ie, int iz) const; /** - * @brief get function that copies data from ExpData::observedEventsStdDev - * to output + * @brief Get standard deviation of observed event data. * * @return standard deviation of observed event data */ std::vector const& getObservedEventsStdDev() const; /** - * @brief get function that returns a pointer to standard deviation of - * observed event data at ie-th occurrence + * @brief Get pointer to standard deviation of + * observed event data at ie-th occurrence. * * @param ie event occurrence * @@ -389,6 +390,13 @@ class ExpData : public SimulationParameters { */ realtype const* getObservedEventsStdDevPtr(int ie) const; + /** + * @brief Set all observations and their standard deviations to NaN. + * + * Useful, e.g., after calling ExpData::setTimepoints. + */ + void clear_observations(); + /** * @brief Arbitrary (not necessarily unique) identifier. */ diff --git a/include/amici/exception.h b/include/amici/exception.h index 7ee91d511b..431be48cb9 100644 --- a/include/amici/exception.h +++ b/include/amici/exception.h @@ -62,29 +62,35 @@ class AmiException : public std::exception { }; /** - * @brief cvode exception handler class + * @brief CVODE exception handler class */ class CvodeException : public AmiException { public: /** * @brief Constructor - * @param error_code error code returned by cvode function - * @param function cvode function name + * @param error_code error code returned by CVODE function + * @param function CVODE function name + * @param extra Extra text to append to error message */ - CvodeException(int error_code, char const* function); + CvodeException( + int error_code, char const* function, char const* extra = nullptr + ); }; /** - * @brief ida exception handler class + * @brief IDA exception handler class */ class IDAException : public AmiException { public: /** * @brief Constructor - * @param error_code error code returned by ida function - * @param function ida function name + * @param error_code error code returned by IDA function + * @param function IDA function name + * @param extra Extra text to append to error message */ - IDAException(int error_code, char const* function); + IDAException( + int error_code, char const* function, char const* extra = nullptr + ); }; /** diff --git a/include/amici/forwardproblem.h b/include/amici/forwardproblem.h index dfe3bd8f22..8c500bf73c 100644 --- a/include/amici/forwardproblem.h +++ b/include/amici/forwardproblem.h @@ -7,7 +7,6 @@ #include "amici/vector.h" #include -#include #include #include @@ -197,7 +196,9 @@ class ForwardProblem { SimulationState const& getSimulationStateTimepoint(int it) const { if (model->getTimepoint(it) == initial_state_.t) return getInitialSimulationState(); - return timepoint_states_.find(model->getTimepoint(it))->second; + auto map_iter = timepoint_states_.find(model->getTimepoint(it)); + assert(map_iter != timepoint_states_.end()); + return map_iter->second; }; /** @@ -250,6 +251,21 @@ class ForwardProblem { void handleEvent(realtype* tlastroot, bool seflag, bool initial_event); + /** + * @brief Store pre-event model state + * + * @param seflag Secondary event flag + * @param initial_event initial event flag + */ + void store_pre_event_state(bool seflag, bool initial_event); + + /** + * @brief Check for, and if applicable, handle any secondary events + * + * @param tlastroot pointer to the timepoint of the last event + */ + void handle_secondary_event(realtype* tlastroot); + /** * @brief Extract output information for events */ @@ -258,9 +274,9 @@ class ForwardProblem { /** * @brief Execute everything necessary for the handling of data points * - * @param it index of data point + * @param t measurement timepoint */ - void handleDataPoint(int it); + void handleDataPoint(realtype t); /** * @brief Applies the event bolus to the current state @@ -353,7 +369,8 @@ class ForwardProblem { * @brief Array of flags indicating which root has been found. * * Array of length nr (ne) with the indices of the user functions gi found - * to have a root. For i = 0, . . . ,nr 1 if gi has a root, and = 0 if not. + * to have a root. For i = 0, . . . ,nr 1 or -1 if gi has a root, and = 0 + * if not. See CVodeGetRootInfo for details. */ std::vector roots_found_; diff --git a/include/amici/logging.h b/include/amici/logging.h index 0118bedd28..6af039d4c4 100644 --- a/include/amici/logging.h +++ b/include/amici/logging.h @@ -83,7 +83,7 @@ struct LogItem { , message(message){}; /** Severity level */ - LogSeverity severity; + LogSeverity severity = LogSeverity::error; /** Short identifier for the logged event */ std::string identifier; diff --git a/include/amici/misc.h b/include/amici/misc.h index c18606e3e9..6dc3294240 100644 --- a/include/amici/misc.h +++ b/include/amici/misc.h @@ -9,7 +9,6 @@ #include #include #include -#include #include #include @@ -291,7 +290,11 @@ class CpuTimer { return d_milliseconds(clock::now() - start_).count(); } - static const bool uses_thread_clock = true; + /** + * @brief Whether the timer uses a thread clock (i.e. provides proper, + * thread-specific CPU time). + */ + static bool const uses_thread_clock = true; private: /** Start time */ @@ -330,7 +333,11 @@ class CpuTimer { / CLOCKS_PER_SEC; } - static const bool uses_thread_clock = false; + /** + * @brief Whether the timer uses a thread clock (i.e. provides proper, + * thread-specific CPU time). + */ + static bool const uses_thread_clock = false; private: /** Start time */ diff --git a/include/amici/model.h b/include/amici/model.h index 72f733e6cf..481164afdf 100644 --- a/include/amici/model.h +++ b/include/amici/model.h @@ -12,7 +12,6 @@ #include "amici/vector.h" #include -#include #include namespace amici { @@ -117,6 +116,8 @@ class Model : public AbstractModel, public ModelDimensions { * @param ndxdotdp_explicit Number of nonzero elements in `dxdotdp_explicit` * @param ndxdotdx_explicit Number of nonzero elements in `dxdotdx_explicit` * @param w_recursion_depth Recursion depth of fw + * @param state_independent_events Map of events with state-independent + * triggers functions, mapping trigger timepoints to event indices. */ Model( ModelDimensions const& model_dimensions, @@ -124,7 +125,8 @@ class Model : public AbstractModel, public ModelDimensions { amici::SecondOrderMode o2mode, std::vector idlist, std::vector z2event, bool pythonGenerated = false, int ndxdotdp_explicit = 0, int ndxdotdx_explicit = 0, - int w_recursion_depth = 0 + int w_recursion_depth = 0, + std::map> state_independent_events = {} ); /** Destructor. */ @@ -702,6 +704,12 @@ class Model : public AbstractModel, public ModelDimensions { /** * @brief Set simulation start time. + * + * Output timepoints are absolute timepoints, independent of + * \f$ t_{0} \f$. + * For output timepoints \f$ t < t_{0} \f$, the initial state will be + * returned. + * @param t0 Simulation start time */ void setT0(double t0); @@ -1435,7 +1443,7 @@ class Model : public AbstractModel, public ModelDimensions { std::vector const& getReinitializationStateIdxs() const; /** Flag indicating Matlab- or Python-based model generation */ - bool pythonGenerated; + bool pythonGenerated = false; /** * @brief getter for dxdotdp (matlab generated) @@ -1449,6 +1457,15 @@ class Model : public AbstractModel, public ModelDimensions { */ SUNMatrixWrapper const& get_dxdotdp_full() const; + /** + * @brief Get trigger times for events that don't require root-finding. + * + * @return List of unique trigger points for events that don't require + * root-finding (i.e. that trigger at predetermined timepoints), + * in ascending order. + */ + virtual std::vector get_trigger_timepoints() const; + /** * Flag indicating whether for * `amici::Solver::sensi_` == `amici::SensitivityOrder::second` @@ -1462,6 +1479,12 @@ class Model : public AbstractModel, public ModelDimensions { /** Logger */ Logger* logger = nullptr; + /** + * @brief Map of trigger timepoints to event indices for events that don't + * require root-finding. + */ + std::map> state_independent_events_ = {}; + protected: /** * @brief Write part of a slice to a buffer according to indices specified @@ -2003,7 +2026,11 @@ class Model : public AbstractModel, public ModelDimensions { * Indicates whether the result of every call to `Model::f*` should be * checked for finiteness */ +#ifdef NDEBUG bool always_check_finite_{false}; +#else + bool always_check_finite_{true}; +#endif /** indicates whether sigma residuals are to be added for every datapoint */ bool sigma_res_{false}; diff --git a/include/amici/model_dae.h b/include/amici/model_dae.h index dd16e74666..b35cfa1d70 100644 --- a/include/amici/model_dae.h +++ b/include/amici/model_dae.h @@ -10,7 +10,6 @@ #include #include -#include #include namespace amici { @@ -41,6 +40,8 @@ class Model_DAE : public Model { * @param ndxdotdp_explicit number of nonzero elements dxdotdp_explicit * @param ndxdotdx_explicit number of nonzero elements dxdotdx_explicit * @param w_recursion_depth Recursion depth of fw + * @param state_independent_events Map of events with state-independent + * triggers functions, mapping trigger timepoints to event indices. */ Model_DAE( ModelDimensions const& model_dimensions, @@ -48,12 +49,13 @@ class Model_DAE : public Model { const SecondOrderMode o2mode, std::vector const& idlist, std::vector const& z2event, bool const pythonGenerated = false, int const ndxdotdp_explicit = 0, int const ndxdotdx_explicit = 0, - int const w_recursion_depth = 0 + int const w_recursion_depth = 0, + std::map> state_independent_events = {} ) : Model( model_dimensions, simulation_parameters, o2mode, idlist, z2event, pythonGenerated, ndxdotdp_explicit, ndxdotdx_explicit, - w_recursion_depth + w_recursion_depth, state_independent_events ) { derived_state_.M_ = SUNMatrixWrapper(nx_solver, nx_solver); auto M_nnz = static_cast( diff --git a/include/amici/model_dimensions.h b/include/amici/model_dimensions.h index f0679dbe36..b5aa1ba21e 100644 --- a/include/amici/model_dimensions.h +++ b/include/amici/model_dimensions.h @@ -31,6 +31,7 @@ struct ModelDimensions { * @param nz Number of event observables * @param nztrue Number of event observables of the non-augmented model * @param ne Number of events + * @param ne_solver Number of events that require root-finding * @param nspl Number of splines * @param nJ Number of objective functions * @param nw Number of repeating elements @@ -58,11 +59,12 @@ struct ModelDimensions { int const nx_rdata, int const nxtrue_rdata, int const nx_solver, int const nxtrue_solver, int const nx_solver_reinit, int const np, int const nk, int const ny, int const nytrue, int const nz, - int const nztrue, int const ne, int const nspl, int const nJ, - int const nw, int const ndwdx, int const ndwdp, int const ndwdw, - int const ndxdotdw, std::vector ndJydy, int const ndxrdatadxsolver, - int const ndxrdatadtcl, int const ndtotal_cldx_rdata, int const nnz, - int const ubw, int const lbw + int const nztrue, int const ne, int const ne_solver, int const nspl, + int const nJ, int const nw, int const ndwdx, int const ndwdp, + int const ndwdw, int const ndxdotdw, std::vector ndJydy, + int const ndxrdatadxsolver, int const ndxrdatadtcl, + int const ndtotal_cldx_rdata, int const nnz, int const ubw, + int const lbw ) : nx_rdata(nx_rdata) , nxtrue_rdata(nxtrue_rdata) @@ -76,6 +78,7 @@ struct ModelDimensions { , nz(nz) , nztrue(nztrue) , ne(ne) + , ne_solver(ne_solver) , nspl(nspl) , nw(nw) , ndwdx(ndwdx) @@ -104,6 +107,8 @@ struct ModelDimensions { Expects(nztrue >= 0); Expects(nztrue <= nz); Expects(ne >= 0); + Expects(ne_solver >= 0); + Expects(ne >= ne_solver); Expects(nspl >= 0); Expects(nw >= 0); Expects(ndwdx >= 0); @@ -164,7 +169,10 @@ struct ModelDimensions { /** Number of events */ int ne{0}; - /** numer of spline functions in the model */ + /** Number of events that require root-finding */ + int ne_solver{0}; + + /** Number of spline functions in the model */ int nspl{0}; /** Number of common expressions */ diff --git a/include/amici/model_ode.h b/include/amici/model_ode.h index 91e0c9cd45..e03e1867d1 100644 --- a/include/amici/model_ode.h +++ b/include/amici/model_ode.h @@ -10,7 +10,6 @@ #include #include -#include #include namespace amici { @@ -40,6 +39,8 @@ class Model_ODE : public Model { * @param ndxdotdp_explicit number of nonzero elements dxdotdp_explicit * @param ndxdotdx_explicit number of nonzero elements dxdotdx_explicit * @param w_recursion_depth Recursion depth of fw + * @param state_independent_events Map of events with state-independent + * triggers functions, mapping trigger timepoints to event indices. */ Model_ODE( ModelDimensions const& model_dimensions, @@ -47,12 +48,13 @@ class Model_ODE : public Model { const SecondOrderMode o2mode, std::vector const& idlist, std::vector const& z2event, bool const pythonGenerated = false, int const ndxdotdp_explicit = 0, int const ndxdotdx_explicit = 0, - int const w_recursion_depth = 0 + int const w_recursion_depth = 0, + std::map> state_independent_events = {} ) : Model( model_dimensions, simulation_parameters, o2mode, idlist, z2event, pythonGenerated, ndxdotdp_explicit, ndxdotdx_explicit, - w_recursion_depth + w_recursion_depth, state_independent_events ) {} void diff --git a/include/amici/serialization.h b/include/amici/serialization.h index 7d0428f71f..c2a69d4b3a 100644 --- a/include/amici/serialization.h +++ b/include/amici/serialization.h @@ -6,10 +6,7 @@ #include "amici/solver.h" #include "amici/solver_cvodes.h" -#include #include -#include -#include #include #include @@ -18,6 +15,7 @@ #include #include #include +#include #include /** @file serialization.h Helper functions and forward declarations for @@ -35,7 +33,7 @@ template void archiveVector(Archive& ar, T** p, int size) { if (Archive::is_loading::value) { if (*p != nullptr) - delete[] * p; + delete[] *p; ar& size; *p = size ? new T[size] : nullptr; } else { @@ -146,6 +144,7 @@ void serialize(Archive& ar, amici::Model& m, unsigned int const /*version*/) { ar& m.sigma_res_; ar& m.steadystate_computation_mode_; ar& m.steadystate_sensitivity_mode_; + ar& m.state_independent_events_; } /** @@ -263,6 +262,7 @@ void serialize( ar& m.nz; ar& m.nztrue; ar& m.ne; + ar& m.ne_solver; ar& m.nspl; ar& m.nw; ar& m.ndwdx; diff --git a/include/amici/simulation_parameters.h b/include/amici/simulation_parameters.h index ca0e127c5c..8a1c1ec23d 100644 --- a/include/amici/simulation_parameters.h +++ b/include/amici/simulation_parameters.h @@ -35,6 +35,11 @@ class SimulationParameters { this->parameters.size(), ParameterScaling::none )) {} +#ifndef SWIGPYTHON + /* + * include/amici/simulation_parameters.h:71: Warning 509: Overloaded method amici::SimulationParameters::SimulationParameters(std::vector< amici::realtype,std::allocator< amici::realtype > >,std::vector< amici::realtype,std::allocator< amici::realtype > >,std::vector< amici::realtype,std::allocator< amici::realtype > >) effectively ignored, + * include/amici/simulation_parameters.h:54: Warning 509: as it is shadowed by amici::SimulationParameters::SimulationParameters(std::vector< amici::realtype,std::allocator< amici::realtype > >,std::vector< amici::realtype,std::allocator< amici::realtype > >,std::vector< int,std::allocator< int > >). + */ /** * @brief Constructor * @param fixedParameters Model constants @@ -69,6 +74,7 @@ class SimulationParameters { this->parameters.size(), ParameterScaling::none )) , ts_(std::move(timepoints)) {} +#endif /** * @brief Set reinitialization of all states based on model constants for @@ -169,7 +175,14 @@ class SimulationParameters { */ std::vector plist; - /** starting time */ + /** + * @brief Starting time of the simulation. + * + * Output timepoints are absolute timepoints, independent of + * \f$ t_{start} \f$. + * For output timepoints \f$ t < t_{start} \f$, the initial state will be + * returned. + */ realtype tstart_{0.0}; /** diff --git a/include/amici/solver.h b/include/amici/solver.h index 120a963ba4..4a1c95b96a 100644 --- a/include/amici/solver.h +++ b/include/amici/solver.h @@ -48,7 +48,8 @@ class Solver { public: /** Type of what is passed to Sundials solvers as user_data */ using user_data_type = std::pair; - + /** Type of the function to free a raw sundials solver pointer */ + using free_solver_ptr = std::function; /** * @brief Default constructor */ @@ -1608,10 +1609,10 @@ class Solver { void applySensitivityTolerances() const; /** pointer to solver memory block */ - mutable std::unique_ptr> solver_memory_; + mutable std::unique_ptr solver_memory_; /** pointer to solver memory block */ - mutable std::vector>> + mutable std::vector> solver_memory_B_; /** Sundials user_data */ diff --git a/include/amici/spline.h b/include/amici/spline.h index 07a436e380..d6f6b24b01 100644 --- a/include/amici/spline.h +++ b/include/amici/spline.h @@ -1,6 +1,5 @@ #ifndef amici_spline_h #define amici_spline_h -#include namespace amici { diff --git a/include/amici/steadystateproblem.h b/include/amici/steadystateproblem.h index dc19c014c4..55c9aaca77 100644 --- a/include/amici/steadystateproblem.h +++ b/include/amici/steadystateproblem.h @@ -8,7 +8,6 @@ #include -#include #include namespace amici { diff --git a/matlab/@amimodel/generateC.m b/matlab/@amimodel/generateC.m index 59eb5e37b4..869f386baf 100644 --- a/matlab/@amimodel/generateC.m +++ b/matlab/@amimodel/generateC.m @@ -163,6 +163,7 @@ function generateC(this) fprintf(fid,[' ' num2str(this.nz) ',\n']); fprintf(fid,[' ' num2str(this.nztrue) ',\n']); fprintf(fid,[' ' num2str(this.nevent) ',\n']); +fprintf(fid,[' ' num2str(this.nevent) ',\n']); fprintf(fid,[' 0,\n']); fprintf(fid,[' ' num2str(this.ng) ',\n']); fprintf(fid,[' ' num2str(this.nw) ',\n']); diff --git a/models/model_calvetti/model_calvetti.h b/models/model_calvetti/model_calvetti.h index 828be82728..c8144bdf5e 100644 --- a/models/model_calvetti/model_calvetti.h +++ b/models/model_calvetti/model_calvetti.h @@ -45,6 +45,7 @@ class Model_model_calvetti : public amici::Model_DAE { 0, 0, 4, + 4, 0, 1, 38, @@ -207,6 +208,6 @@ class Model_model_calvetti : public amici::Model_DAE { } // namespace model_model_calvetti -} // namespace amici +} // namespace amici #endif /* _amici_model_calvetti_h */ diff --git a/models/model_dirac/model_dirac.h b/models/model_dirac/model_dirac.h index 7a762479b5..cfd943e456 100644 --- a/models/model_dirac/model_dirac.h +++ b/models/model_dirac/model_dirac.h @@ -45,6 +45,7 @@ class Model_model_dirac : public amici::Model_ODE { 0, 0, 2, + 2, 0, 1, 0, @@ -204,6 +205,6 @@ class Model_model_dirac : public amici::Model_ODE { } // namespace model_model_dirac -} // namespace amici +} // namespace amici #endif /* _amici_model_dirac_h */ diff --git a/models/model_events/model_events.h b/models/model_events/model_events.h index df4bb68ae7..ad6c976419 100644 --- a/models/model_events/model_events.h +++ b/models/model_events/model_events.h @@ -59,6 +59,7 @@ class Model_model_events : public amici::Model_ODE { 2, 2, 6, + 6, 0, 1, 0, @@ -232,6 +233,6 @@ class Model_model_events : public amici::Model_ODE { } // namespace model_model_events -} // namespace amici +} // namespace amici #endif /* _amici_model_events_h */ diff --git a/models/model_jakstat_adjoint/model_jakstat_adjoint.h b/models/model_jakstat_adjoint/model_jakstat_adjoint.h index fdac2a9f94..6d7601947a 100644 --- a/models/model_jakstat_adjoint/model_jakstat_adjoint.h +++ b/models/model_jakstat_adjoint/model_jakstat_adjoint.h @@ -49,6 +49,7 @@ class Model_model_jakstat_adjoint : public amici::Model_ODE { 0, 0, 0, + 0, 1, 2, 1, @@ -210,6 +211,6 @@ class Model_model_jakstat_adjoint : public amici::Model_ODE { } // namespace model_model_jakstat_adjoint -} // namespace amici +} // namespace amici #endif /* _amici_model_jakstat_adjoint_h */ diff --git a/models/model_jakstat_adjoint_o2/model_jakstat_adjoint_o2.h b/models/model_jakstat_adjoint_o2/model_jakstat_adjoint_o2.h index 22ca276067..bfac0b3267 100644 --- a/models/model_jakstat_adjoint_o2/model_jakstat_adjoint_o2.h +++ b/models/model_jakstat_adjoint_o2/model_jakstat_adjoint_o2.h @@ -49,6 +49,7 @@ class Model_model_jakstat_adjoint_o2 : public amici::Model_ODE { 0, 0, 0, + 0, 18, 10, 2, @@ -210,6 +211,6 @@ class Model_model_jakstat_adjoint_o2 : public amici::Model_ODE { } // namespace model_model_jakstat_adjoint_o2 -} // namespace amici +} // namespace amici #endif /* _amici_model_jakstat_adjoint_o2_h */ diff --git a/models/model_nested_events/model_nested_events.h b/models/model_nested_events/model_nested_events.h index 9ff8f519fe..0ed43eedbd 100644 --- a/models/model_nested_events/model_nested_events.h +++ b/models/model_nested_events/model_nested_events.h @@ -48,6 +48,7 @@ class Model_model_nested_events : public amici::Model_ODE { 0, 0, 4, + 4, 0, 1, 0, @@ -210,6 +211,6 @@ class Model_model_nested_events : public amici::Model_ODE { } // namespace model_model_nested_events -} // namespace amici +} // namespace amici #endif /* _amici_model_nested_events_h */ diff --git a/models/model_neuron/model_neuron.h b/models/model_neuron/model_neuron.h index e8f6f5c21f..e744f57f65 100644 --- a/models/model_neuron/model_neuron.h +++ b/models/model_neuron/model_neuron.h @@ -62,6 +62,7 @@ class Model_model_neuron : public amici::Model_ODE { 1, 1, 1, + 1, 0, 1, 0, @@ -238,6 +239,6 @@ class Model_model_neuron : public amici::Model_ODE { } // namespace model_model_neuron -} // namespace amici +} // namespace amici #endif /* _amici_model_neuron_h */ diff --git a/models/model_neuron_o2/model_neuron_o2.h b/models/model_neuron_o2/model_neuron_o2.h index 23df2b9b33..a108e6284b 100644 --- a/models/model_neuron_o2/model_neuron_o2.h +++ b/models/model_neuron_o2/model_neuron_o2.h @@ -64,6 +64,7 @@ class Model_model_neuron_o2 : public amici::Model_ODE { 5, 1, 1, + 1, 0, 5, 2, @@ -242,6 +243,6 @@ class Model_model_neuron_o2 : public amici::Model_ODE { } // namespace model_model_neuron_o2 -} // namespace amici +} // namespace amici #endif /* _amici_model_neuron_o2_h */ diff --git a/models/model_robertson/model_robertson.h b/models/model_robertson/model_robertson.h index 7f4377d785..816dd2db32 100644 --- a/models/model_robertson/model_robertson.h +++ b/models/model_robertson/model_robertson.h @@ -47,6 +47,7 @@ class Model_model_robertson : public amici::Model_DAE { 0, 0, 0, + 0, 1, 1, 2, @@ -209,6 +210,6 @@ class Model_model_robertson : public amici::Model_DAE { } // namespace model_model_robertson -} // namespace amici +} // namespace amici #endif /* _amici_model_robertson_h */ diff --git a/models/model_steadystate/model_steadystate.h b/models/model_steadystate/model_steadystate.h index b61649f9c8..776b754b08 100644 --- a/models/model_steadystate/model_steadystate.h +++ b/models/model_steadystate/model_steadystate.h @@ -46,6 +46,7 @@ class Model_model_steadystate : public amici::Model_ODE { 0, 0, 0, + 0, 1, 2, 2, @@ -204,6 +205,6 @@ class Model_model_steadystate : public amici::Model_ODE { } // namespace model_model_steadystate -} // namespace amici +} // namespace amici #endif /* _amici_model_steadystate_h */ diff --git a/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb b/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb index 5f66ea4db9..eb343dca09 100644 --- a/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb +++ b/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb @@ -62,7 +62,7 @@ "from IPython.display import Image\n", "\n", "fig = Image(\n", - " filename=(\"../../../documentation/gfx/steadystate_solver_workflow.png\")\n", + " filename=\"../../../documentation/gfx/steadystate_solver_workflow.png\"\n", ")\n", "fig" ] @@ -97,12 +97,9 @@ ], "source": [ "import libsbml\n", - "import importlib\n", "import amici\n", "import os\n", - "import sys\n", "import numpy as np\n", - "import matplotlib.pyplot as plt\n", "\n", "# SBML model we want to import\n", "sbml_file = \"model_constant_species.xml\"\n", @@ -397,7 +394,7 @@ " * `-5`: Error: The model was simulated past the timepoint `t=1e100` without finding a steady state. Therefore, it is likely that the model has not steady state for the given parameter vector.\n", "\n", "Here, only the second entry of `posteq_status` contains a positive integer: The first run of Newton's method failed due to a Jacobian, which oculd not be factorized, but the second run (simulation) contains the entry 1 (success). The third entry is 0, thus Newton's method was not launched for a second time.\n", - "More information can be found in`posteq_numsteps`: Also here, only the second entry contains a positive integer, which is smaller than the maximum number of steps taken (<1000). Hence steady state was reached via simulation, which corresponds to the simulated time written to `posteq_time`.\n", + "More information can be found in`posteq_numsteps`: Also here, only the second entry contains a positive integer, which is smaller than the maximum number of steps taken (<1000). Hence, steady state was reached via simulation, which corresponds to the simulated time written to `posteq_time`.\n", "\n", "We want to demonstrate a complete failure if inferring the steady state by reducing the number of integration steps to a lower value:" ] @@ -951,7 +948,7 @@ } ], "source": [ - "# Singluar Jacobian, use simulation\n", + "# Singular Jacobian, use simulation\n", "model.setSteadyStateSensitivityMode(\n", " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n", ")\n", @@ -1207,7 +1204,7 @@ } ], "source": [ - "# Non-singular Jacobian, use simulaiton\n", + "# Non-singular Jacobian, use simulation\n", "model_reduced.setSteadyStateSensitivityMode(\n", " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n", ")\n", diff --git a/python/examples/example_errors.ipynb b/python/examples/example_errors.ipynb index 5e07803d96..2b35964d8b 100644 --- a/python/examples/example_errors.ipynb +++ b/python/examples/example_errors.ipynb @@ -19,15 +19,16 @@ "source": [ "%matplotlib inline\n", "import os\n", + "from contextlib import suppress\n", + "from pathlib import Path\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", "import amici\n", - "from amici.petab_import import import_petab_problem\n", - "from amici.petab_objective import simulate_petab, RDATAS, EDATAS\n", + "from amici.petab.petab_import import import_petab_problem\n", + "from amici.petab.simulations import simulate_petab, RDATAS, EDATAS\n", "from amici.plotting import plot_state_trajectories, plot_jacobian\n", - "import petab\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "from pathlib import Path\n", - "from contextlib import suppress\n", "\n", "try:\n", " import benchmark_models_petab\n", @@ -153,7 +154,7 @@ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n", - "print(\"Simulations finished succesfully.\")\n", + "print(\"Simulations finished successfully.\")\n", "print()\n", "\n", "\n", @@ -174,7 +175,7 @@ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n", - "print(\"Simulations finished succesfully.\")" + "print(\"Simulations finished successfully.\")" ] }, { @@ -339,7 +340,7 @@ "**What happened?**\n", "\n", "AMICI failed to integrate the forward problem. The problem occurred for only one simulation condition, `condition_step_00_3`. The issue occurred at $t = 429.232$, where the error test failed.\n", - "This means, the solver is unable to take a step of non-zero size without violating the choosen error tolerances." + "This means, the solver is unable to take a step of non-zero size without violating the chosen error tolerances." ] }, { @@ -400,7 +401,7 @@ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n", ")\n", "assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n", - "print(\"Simulations finished succesfully.\")" + "print(\"Simulations finished successfully.\")" ] }, { @@ -457,7 +458,7 @@ "source": [ "**What happened?**\n", "\n", - "The simulation failed because the initial step-size after an event or heaviside function was too small. The error occured during simulation of condition `model1_data1` after successful preequilibration (`model1_data2`)." + "The simulation failed because the initial step-size after an event or heaviside function was too small. The error occurred during simulation of condition `model1_data1` after successful preequilibration (`model1_data2`)." ] }, { @@ -646,7 +647,7 @@ "id": "62d82971", "metadata": {}, "source": [ - "Considering that `n_par` occurrs as exponent, it's magnitude looks pretty high.\n", + "Considering that `n_par` occurs as exponent, it's magnitude looks pretty high.\n", "This term is very likely causing the problem - let's check:" ] }, @@ -909,7 +910,7 @@ "source": [ "**What happened?**\n", "\n", - "All given experimental conditions require pre-equilibration, i.e., finding a steady state. AMICI first tries to find a steady state using the Newton solver, if that fails, it tries simulating until steady state, if that also failes, it tries the Newton solver from the end of the simulation. In this case, all three failed. Neither Newton's method nor simulation yielded a steady state satisfying the required tolerances.\n", + "All given experimental conditions require pre-equilibration, i.e., finding a steady state. AMICI first tries to find a steady state using the Newton solver, if that fails, it tries simulating until steady state, if that also fails, it tries the Newton solver from the end of the simulation. In this case, all three failed. Neither Newton's method nor simulation yielded a steady state satisfying the required tolerances.\n", "\n", "This can also be seen in `ReturnDataView.preeq_status` (the three statuses corresponds to Newton \\#1, Simulation, Newton \\#2):" ] diff --git a/python/examples/example_jax/ExampleJax.ipynb b/python/examples/example_jax/ExampleJax.ipynb index efda5b458e..225bd13667 100644 --- a/python/examples/example_jax/ExampleJax.ipynb +++ b/python/examples/example_jax/ExampleJax.ipynb @@ -262,7 +262,7 @@ "metadata": {}, "outputs": [], "source": [ - "from amici.petab_import import import_petab_problem\n", + "from amici.petab.petab_import import import_petab_problem\n", "\n", "amici_model = import_petab_problem(\n", " petab_problem, force_compile=True, verbose=False\n", @@ -276,7 +276,7 @@ "source": [ "# JAX implementation\n", "\n", - "For full jax support, we would have to implement a new [primitive](https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html), which would require quite a bit of engineering, and in the end wouldn't add much benefit since AMICI can't run on GPUs. Instead will interface AMICI using the experimental jax module [host_callback](https://jax.readthedocs.io/en/latest/jax.experimental.host_callback.html)." + "For full jax support, we would have to implement a new [primitive](https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html), which would require quite a bit of engineering, and in the end wouldn't add much benefit since AMICI can't run on GPUs. Instead, will interface AMICI using the experimental jax module [host_callback](https://jax.readthedocs.io/en/latest/jax.experimental.host_callback.html)." ] }, { @@ -294,7 +294,7 @@ "metadata": {}, "outputs": [], "source": [ - "from amici.petab_objective import simulate_petab\n", + "from amici.petab.simulations import simulate_petab\n", "import amici\n", "\n", "amici_solver = amici_model.getSolver()\n", @@ -341,7 +341,7 @@ "id": "98e819bd", "metadata": {}, "source": [ - "Now we can finally define the JAX function that runs amici simulation using the host callback. We add a `custom_jvp` decorater so that we can define a custom jacobian vector product function in the next step. More details about custom jacobian vector product functions can be found in the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html)" + "Now we can finally define the JAX function that runs amici simulation using the host callback. We add a `custom_jvp` decorator so that we can define a custom jacobian vector product function in the next step. More details about custom jacobian vector product functions can be found in the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html)" ] }, { diff --git a/python/examples/example_large_models/example_performance_optimization.ipynb b/python/examples/example_large_models/example_performance_optimization.ipynb index 31a9fc1729..82ca8d9dbb 100644 --- a/python/examples/example_large_models/example_performance_optimization.ipynb +++ b/python/examples/example_large_models/example_performance_optimization.ipynb @@ -9,9 +9,9 @@ "\n", "**Objective:** Give some hints to speed up import and simulation of larger models\n", "\n", - "This notebook gives some hints that may help to speed up import and simulation of (mostly) larger models. While some of these settings may also yield slight performance improvements for smaller models, other settings may make things slower. The impact may be highly model-dependent (number of states, number of parameters, rate expressions) or system-dependent and it's worthile doing some benchmarking.\n", + "This notebook gives some hints that may help to speed up import and simulation of (mostly) larger models. While some of these settings may also yield slight performance improvements for smaller models, other settings may make things slower. The impact may be highly model-dependent (number of states, number of parameters, rate expressions) or system-dependent, and it's worthwhile doing some benchmarking.\n", "\n", - "To simulate models in AMICI, a model specified in a high-level format needs to be imported first, as shown in the following figure. This rougly involves the following steps:\n", + "To simulate models in AMICI, a model specified in a high-level format needs to be imported first, as shown in the following figure. This roughly involves the following steps:\n", "\n", "1. Generating the ODEs\n", "2. Computing derivatives\n", @@ -21,7 +21,7 @@ "\n", "![AMICI workflow](https://raw.githubusercontent.com/AMICI-dev/AMICI/master/documentation/gfx/amici_workflow.png)\n", "\n", - "There are various options to speed up individual steps of this process. Generally, faster import comes with slower simulation and vice versa. During parameter estimation, a model is often imported only once, and then millions of simulations are run. Therefore, faster simulation will easily compensate for slower import (one-off cost). In other cases, many models may to have to be imported, but only few simulations will be executed. In this case, faster import may bee more relevant.\n", + "There are various options to speed up individual steps of this process. Generally, faster import comes with slower simulation and vice versa. During parameter estimation, a model is often imported only once, and then millions of simulations are run. Therefore, faster simulation will easily compensate for slower import (one-off cost). In other cases, many models may to have to be imported, but only few simulations will be executed. In this case, faster import may be more relevant.\n", "\n", "In the following, we will present various settings that (may) influence import and simulation time. We will follow the order of steps outlined above.\n", "\n", @@ -35,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "from IPython.core.pylabtools import figsize, getfigs\n", + "from IPython.core.pylabtools import figsize\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "\n", @@ -78,7 +78,7 @@ "See also the following section for the case that no sensitivities are required at all.\n", "\n", "\n", - "#### Not generating sensivitiy code\n", + "#### Not generating sensitivity code\n", "\n", "If only forward simulations of a model are required, a modest import speedup can be obtained from not generating sensitivity code. This can be enabled via the `generate_sensitivity_code` argument of [amici.sbml_import.SbmlImporter.sbml2amici](https://amici.readthedocs.io/en/latest/generated/amici.sbml_import.SbmlImporter.html#amici.sbml_import.SbmlImporter.sbml2amici) or [amici.pysb_import.pysb2amici](https://amici.readthedocs.io/en/latest/generated/amici.pysb_import.html?highlight=pysb2amici#amici.pysb_import.pysb2amici).\n", "\n", @@ -160,7 +160,7 @@ "source": [ "#### Parallelization\n", "\n", - "For large models or complex model expressions, symbolic computation of the derivatives can be quite time consuming. This can be parallelized by setting the environment variable `AMICI_IMPORT_NPROCS` to the number of parallel processes that should be used. The impact strongly depends on the model. Note that setting this value too high may have a negative performance impact (benchmark!).\n", + "For large models or complex model expressions, symbolic computation of the derivatives can be quite time-consuming. This can be parallelized by setting the environment variable `AMICI_IMPORT_NPROCS` to the number of parallel processes that should be used. The impact strongly depends on the model. Note that setting this value too high may have a negative performance impact (benchmark!).\n", "\n", "Impact for a large and a tiny model:" ] @@ -241,7 +241,7 @@ "\n", "Simplification of model expressions can be disabled by passing `simplify=None` to [amici.sbml_import.SbmlImporter.sbml2amici](https://amici.readthedocs.io/en/latest/generated/amici.sbml_import.SbmlImporter.html#amici.sbml_import.SbmlImporter.sbml2amici) or [amici.pysb_import.pysb2amici](https://amici.readthedocs.io/en/latest/generated/amici.pysb_import.html?highlight=pysb2amici#amici.pysb_import.pysb2amici).\n", "\n", - "Depending on the given model, different simplification schemes may be cheaper or more beneficial than the default. SymPy's simplifcation functions are [well documentated](https://docs.sympy.org/latest/modules/simplify/simplify.html)." + "Depending on the given model, different simplification schemes may be cheaper or more beneficial than the default. SymPy's simplification functions are [well documented](https://docs.sympy.org/latest/modules/simplify/simplify.html)." ] }, { @@ -384,11 +384,11 @@ "source": [ "#### Compiler flags\n", "\n", - "For most compilers, different machine code optimizations can be enabled/disabled by the `-O0`, `-O1`, `-O2`, `-O3` flags, where a higher number enables more optimizations. For fastet simulation, `-O3` should be used. However, these optimizations come at the cost of increased compile times. If models grow very large, some optimizations (especially with `g++`, see above) become prohibitively slow. In this case, a lower optimization level may be necessary to be able to compile models at all.\n", + "For most compilers, different machine code optimizations can be enabled/disabled by the `-O0`, `-O1`, `-O2`, `-O3` flags, where a higher number enables more optimizations. For faster simulation, `-O3` should be used. However, these optimizations come at the cost of increased compile times. If models grow very large, some optimizations (especially with `g++`, see above) become prohibitively slow. In this case, a lower optimization level may be necessary to be able to compile models at all.\n", "\n", - "Another potential performance gain can be obtained from using CPU-specific instructions using `-march=native`. The disadvantage is, that the compiled model extension will only run on CPUs supporting the same instruction set. This may be become problematic when attempting to use an AMICI model on a machine other than on which it was compiled (e.g. on hetergenous compute clusters).\n", + "Another potential performance gain can be obtained from using CPU-specific instructions using `-march=native`. The disadvantage is, that the compiled model extension will only run on CPUs supporting the same instruction set. This may be become problematic when attempting to use an AMICI model on a machine other than on which it was compiled (e.g. on heterogeneous compute clusters).\n", "\n", - "These compiler flags should be set for both, AMICI installation installation and model compilation. \n", + "These compiler flags should be set for both, AMICI installation and model compilation. \n", "\n", "For AMICI installation, e.g.,\n", "```bash\n", @@ -475,7 +475,7 @@ "source": [ "#### Using some optimized BLAS\n", "\n", - "You might have access to some custom [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) optimized for your hardware which might speed up your simulations somewhat. We are not aware of any systematic evaluation and cannot make any recomendation. You pass the respective compiler and linker flags via the environment variables `BLAS_CFLAGS` and `BLAS_LIBS`, respectively." + "You might have access to some custom [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) optimized for your hardware which might speed up your simulations somewhat. We are not aware of any systematic evaluation and cannot make any recommendation. You pass the respective compiler and linker flags via the environment variables `BLAS_CFLAGS` and `BLAS_LIBS`, respectively." ] }, { @@ -487,7 +487,7 @@ "\n", "A major determinant of simulation time for a given model is the required accuracy and the selected solvers. This has been evaluated, for example, in https://doi.org/10.1038/s41598-021-82196-2 and is not covered further here. \n", "\n", - "### Adjoint *vs.* forward sensivities\n", + "### Adjoint *vs.* forward sensitivities\n", "\n", "If only the objective function gradient is required, adjoint sensitivity analysis are often preferable over forward sensitivity analysis. As a rule of thumb, adjoint sensitivity analysis seems to outperform forward sensitivity analysis for models with more than 20 parameters:\n", "\n", diff --git a/python/examples/example_petab/petab.ipynb b/python/examples/example_petab/petab.ipynb index 689d793f56..afc4b2a38e 100644 --- a/python/examples/example_petab/petab.ipynb +++ b/python/examples/example_petab/petab.ipynb @@ -6,304 +6,90 @@ "source": [ "# Using PEtab\n", "\n", - "This notebook illustrates how to use [PEtab](https://github.com/petab-dev/petab) with AMICI." + "This notebook illustrates how to run model simulations based on [PEtab](https://github.com/petab-dev/petab) problems with AMICI.\n", + "\n", + "PEtab is a format for specifying parameter estimation problems in systems biology. It is based on [SBML](http://sbml.org/) and [TSV](https://en.wikipedia.org/wiki/Tab-separated_values) files. (AMICI also supports PySB-based PEtab problems, that will be covered by PEtab v2). The Python package [pyPESTO](https://pypesto.readthedocs.io/) provides a convenient interface for parameter estimation with PEtab problems and uses AMICI as a backend. However, AMICI can also be used directly to simulate PEtab problems. This is illustrated in this notebook." ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from amici.petab_import import import_petab_problem\n", - "from amici.petab_objective import simulate_petab\n", "import petab\n", "\n", - "import os" + "from amici import runAmiciSimulation\n", + "from amici.petab.petab_import import import_petab_problem\n", + "from amici.petab.petab_problem import PetabProblem\n", + "from amici.petab.simulations import simulate_petab\n", + "from amici.plotting import plot_state_trajectories" ] }, { "cell_type": "markdown", - "metadata": {}, - "source": [ - "We use an example model from the [benchmark collection](https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab):" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Cloning into 'tmp/benchmark-models'...\n", - "remote: Enumerating objects: 142, done.\u001b[K\n", - "remote: Counting objects: 100% (142/142), done.\u001b[K\n", - "remote: Compressing objects: 100% (122/122), done.\u001b[K\n", - "remote: Total 142 (delta 41), reused 104 (delta 18), pack-reused 0\u001b[K\n", - "Receiving objects: 100% (142/142), 648.29 KiB | 1.23 MiB/s, done.\n", - "Resolving deltas: 100% (41/41), done.\n" - ] - } - ], "source": [ - "!git clone --depth 1 https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git tmp/benchmark-models || (cd tmp/benchmark-models && git pull)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "total 68\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Alkan_SciSignal2018\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Beer_MolBioSystems2014\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Boehm_JProteomeRes2014\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Borghans_BiophysChem1997\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Brannmark_JBC2010\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Bruno_JExpBio2016\r\n", - "-rwxr-xr-x 1 yannik yannik 654 Mär 17 15:27 checkBenchmarkModels.py\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Chen_MSB2009\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Crauste_CellSystems2017\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Elowitz_Nature2000\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Fiedler_BMC2016\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Fujita_SciSignal2010\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Perelson_Science1996\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Rahman_MBS2016\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Sneyd_PNAS2002\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Weber_BMC2015\r\n", - "drwxr-xr-x 2 yannik yannik 4096 Mär 17 15:27 Zheng_PNAS2012\r\n" - ] - } + "## Importing a PEtab problem" ], - "source": [ - "folder_base = \"tmp/benchmark-models/Benchmark-Models/\"\n", - "!ls -l $folder_base" - ] + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We import a model to PEtab from a provided yaml file:" + "We use the [Boehm_JProteomeRes2014](https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/master/Benchmark-Models/Boehm_JProteomeRes2014) example model from the [benchmark collection](https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab):" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model_name = \"Boehm_JProteomeRes2014\"\n", - "yaml_file = os.path.join(folder_base, model_name, model_name + \".yaml\")\n", - "petab_problem = petab.Problem.from_yaml(yaml_file)" + "# local path or URL to the yaml file for the PEtab problem\n", + "petab_yaml = f\"https://raw.githubusercontent.com/Benchmarking-Initiative/Benchmark-Models-PEtab/master/Benchmark-Models/{model_name}/{model_name}.yaml\"\n", + "# load the problem using the PEtab library\n", + "petab_problem = petab.Problem.from_yaml(petab_yaml)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we import the model to amici, compile it and obtain a function handle:" + "\n", + "Next, we import the model to amici using `import_petab_problem`. `import_petab_problem` has many options to choose between faster importer or more flexible or faster model simulations. We import the model with default settings, and we obtain an AMICI model instance:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2020-03-17 15:27:27.586 - amici.petab_import - INFO - Importing model ...\n", - "2020-03-17 15:27:27.593 - amici.petab_import - INFO - Model name is 'Boehm_JProteomeRes2014'. Writing model code to '/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014'.\n", - "2020-03-17 15:27:27.598 - amici.petab_import - INFO - Species: 8\n", - "2020-03-17 15:27:27.599 - amici.petab_import - INFO - Global parameters: 9\n", - "2020-03-17 15:27:27.599 - amici.petab_import - INFO - Reactions: 9\n", - "2020-03-17 15:27:27.715 - amici.petab_import - INFO - Observables: 3\n", - "2020-03-17 15:27:27.715 - amici.petab_import - INFO - Sigmas: 3\n", - "2020-03-17 15:27:27.722 - amici.petab_import - DEBUG - Adding output parameters to model: OrderedDict([('noiseParameter1_pSTAT5A_rel', None), ('noiseParameter1_pSTAT5B_rel', None), ('noiseParameter1_rSTAT5A_rel', None)])\n", - "2020-03-17 15:27:27.725 - amici.petab_import - DEBUG - Condition table: (1, 1)\n", - "2020-03-17 15:27:27.726 - amici.petab_import - DEBUG - Fixed parameters are []\n", - "2020-03-17 15:27:27.728 - amici.petab_import - INFO - Overall fixed parameters: 0\n", - "2020-03-17 15:27:27.729 - amici.petab_import - INFO - Variable parameters: 12\n", - "2020-03-17 15:27:27.735 - amici.sbml_import - INFO - Finished processing SBML parameters (1.25E-03s)\n", - "2020-03-17 15:27:27.749 - amici.sbml_import - INFO - Finished processing SBML species (1.26E-02s)\n", - "2020-03-17 15:27:27.829 - amici.sbml_import - INFO - Finished processing SBML reactions (7.41E-02s)\n", - "2020-03-17 15:27:27.833 - amici.sbml_import - INFO - Finished processing SBML compartments (4.23E-04s)\n", - "2020-03-17 15:27:27.898 - amici.sbml_import - INFO - Finished processing SBML rules (6.47E-02s)\n", - "2020-03-17 15:27:28.012 - amici.sbml_import - INFO - Finished processing SBML observables (6.77E-02s)\n", - "2020-03-17 15:27:28.139 - amici.ode_export - INFO - Finished writing J.cpp (1.14E-01s)\n", - "2020-03-17 15:27:28.160 - amici.ode_export - INFO - Finished writing JB.cpp (2.04E-02s)\n", - "2020-03-17 15:27:28.167 - amici.ode_export - INFO - Finished writing JDiag.cpp (6.41E-03s)\n", - "2020-03-17 15:27:28.187 - amici.ode_export - INFO - Finished writing JSparse.cpp (1.91E-02s)\n", - "2020-03-17 15:27:28.217 - amici.ode_export - INFO - Finished writing JSparseB.cpp (2.73E-02s)\n", - "2020-03-17 15:27:28.236 - amici.ode_export - INFO - Finished writing Jy.cpp (1.65E-02s)\n", - "2020-03-17 15:27:28.344 - amici.ode_export - INFO - Finished writing dJydsigmay.cpp (1.07E-01s)\n", - "2020-03-17 15:27:28.389 - amici.ode_export - INFO - Finished writing dJydy.cpp (3.99E-02s)\n", - "2020-03-17 15:27:28.466 - amici.ode_export - INFO - Finished writing dwdp.cpp (7.61E-02s)\n", - "2020-03-17 15:27:28.473 - amici.ode_export - INFO - Finished writing dwdx.cpp (5.87E-03s)\n", - "2020-03-17 15:27:28.497 - amici.ode_export - INFO - Finished writing dxdotdw.cpp (2.32E-02s)\n", - "2020-03-17 15:27:28.533 - amici.ode_export - INFO - Finished writing dxdotdp_explicit.cpp (3.38E-02s)\n", - "2020-03-17 15:27:28.756 - amici.ode_export - INFO - Finished writing dydx.cpp (1.98E-01s)\n", - "2020-03-17 15:27:28.910 - amici.ode_export - INFO - Finished writing dydp.cpp (1.53E-01s)\n", - "2020-03-17 15:27:28.926 - amici.ode_export - INFO - Finished writing dsigmaydp.cpp (1.40E-02s)\n", - "2020-03-17 15:27:28.931 - amici.ode_export - INFO - Finished writing sigmay.cpp (2.46E-03s)\n", - "2020-03-17 15:27:28.950 - amici.ode_export - INFO - Finished writing w.cpp (1.55E-02s)\n", - "2020-03-17 15:27:28.967 - amici.ode_export - INFO - Finished writing x0.cpp (1.57E-02s)\n", - "2020-03-17 15:27:28.975 - amici.ode_export - INFO - Finished writing x0_fixedParameters.cpp (4.78E-03s)\n", - "2020-03-17 15:27:29.027 - amici.ode_export - INFO - Finished writing sx0.cpp (5.01E-02s)\n", - "2020-03-17 15:27:29.069 - amici.ode_export - INFO - Finished writing sx0_fixedParameters.cpp (3.14E-02s)\n", - "2020-03-17 15:27:29.104 - amici.ode_export - INFO - Finished writing xdot.cpp (3.43E-02s)\n", - "2020-03-17 15:27:29.129 - amici.ode_export - INFO - Finished writing y.cpp (2.16E-02s)\n", - "2020-03-17 15:27:29.136 - amici.ode_export - INFO - Finished writing x_rdata.cpp (4.95E-03s)\n", - "2020-03-17 15:27:29.138 - amici.ode_export - INFO - Finished writing total_cl.cpp (6.59E-04s)\n", - "2020-03-17 15:27:29.147 - amici.ode_export - INFO - Finished writing x_solver.cpp (7.72E-03s)\n", - "2020-03-17 15:27:29.166 - amici.ode_export - INFO - Finished generating cpp code (1.14E+00s)\n", - "2020-03-17 15:27:46.200 - amici.ode_export - INFO - Finished compiling cpp code (1.70E+01s)\n", - "2020-03-17 15:27:46.204 - amici.petab_import - INFO - Finished Importing PEtab model (1.86E+01s)\n", - "2020-03-17 15:27:46.209 - amici.petab_import - INFO - Successfully loaded model Boehm_JProteomeRes2014 from /home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "running build_ext\n", - "building 'Boehm_JProteomeRes2014._Boehm_JProteomeRes2014' extension\n", - "swigging swig/Boehm_JProteomeRes2014.i to swig/Boehm_JProteomeRes2014_wrap.cpp\n", - "swig -python -c++ -modern -outdir Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/swig -I/home/yannik/amici/python/sdist/amici/include -o swig/Boehm_JProteomeRes2014_wrap.cpp swig/Boehm_JProteomeRes2014.i\n", - "creating build\n", - "creating build/temp.linux-x86_64-3.7\n", - "creating build/temp.linux-x86_64-3.7/swig\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c swig/Boehm_JProteomeRes2014_wrap.cpp -o build/temp.linux-x86_64-3.7/swig/Boehm_JProteomeRes2014_wrap.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdw.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdw.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_total_cl.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_total_cl.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_x_rdata.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x_rdata.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdp_implicit_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_implicit_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dsigmaydp.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dsigmaydp.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_y.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_y.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dydp.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dydp.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_w.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_w.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JSparseB_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparseB_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdw_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdw_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dwdx_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdx_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_x0.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x0.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dwdx.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdx.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dJydy_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydy_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JSparseB.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparseB.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JSparseB_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparseB_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdp_explicit_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_explicit_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_sx0_fixedParameters.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_sx0_fixedParameters.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JSparse_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparse_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdp_explicit.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_explicit.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dJydy.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydy.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dwdp_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdp_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_x0_fixedParameters.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x0_fixedParameters.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdw_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdw_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dJydsigmay.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydsigmay.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdp_implicit_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_implicit_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dwdp.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdp.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_sx0.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_sx0.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JB.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JB.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dwdx_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdx_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c wrapfunctions.cpp -o build/temp.linux-x86_64-3.7/wrapfunctions.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_x_solver.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x_solver.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JSparse.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparse.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_xdot.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_xdot.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dJydy_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydy_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dwdp_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdp_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JSparse_colptrs.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparse_colptrs.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_J.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_J.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dydx.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dydx.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_JDiag.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JDiag.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_Jy.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_Jy.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_sigmay.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_sigmay.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "gcc -pthread -B /home/yannik/anaconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014 -I/home/yannik/amici/python/sdist/amici/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/gsl -I/home/yannik/amici/python/sdist/amici/ThirdParty/sundials/include -I/home/yannik/amici/python/sdist/amici/ThirdParty/SuiteSparse/include -I/usr/include/hdf5/serial -I/home/yannik/anaconda3/include/python3.7m -c Boehm_JProteomeRes2014_dxdotdp_explicit_rowvals.cpp -o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_explicit_rowvals.o -std=c++14\n", - "cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\n", - "g++ -pthread -shared -B /home/yannik/anaconda3/compiler_compat -L/home/yannik/anaconda3/lib -Wl,-rpath=/home/yannik/anaconda3/lib -Wl,--no-as-needed -Wl,--sysroot=/ build/temp.linux-x86_64-3.7/swig/Boehm_JProteomeRes2014_wrap.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdw.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_total_cl.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x_rdata.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_implicit_colptrs.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dsigmaydp.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_y.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dydp.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_w.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparseB_rowvals.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdw_rowvals.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdx_rowvals.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x0.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdx.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydy_colptrs.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparseB.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparseB_colptrs.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_explicit_colptrs.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_sx0_fixedParameters.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparse_rowvals.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_explicit.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydy.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdp_colptrs.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x0_fixedParameters.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdw_colptrs.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydsigmay.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_implicit_rowvals.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdp.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_sx0.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JB.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdx_colptrs.o build/temp.linux-x86_64-3.7/wrapfunctions.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_x_solver.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparse.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_xdot.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dJydy_rowvals.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dwdp_rowvals.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JSparse_colptrs.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_J.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dydx.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_JDiag.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_Jy.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_sigmay.o build/temp.linux-x86_64-3.7/Boehm_JProteomeRes2014_dxdotdp_explicit_rowvals.o -L/usr/lib/x86_64-linux-gnu/hdf5/serial -L/home/yannik/amici/python/sdist/amici/libs -lamici -lsundials -lsuitesparse -lcblas -lhdf5_hl_cpp -lhdf5_hl -lhdf5_cpp -lhdf5 -o /home/yannik/amici/python/examples/amici_models/Boehm_JProteomeRes2014/Boehm_JProteomeRes2014/_Boehm_JProteomeRes2014.cpython-37m-x86_64-linux-gnu.so\n", - "\n" - ] - } - ], + "outputs": [], "source": [ - "amici_model = import_petab_problem(petab_problem)" + "amici_model = import_petab_problem(petab_problem, verbose=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "That's it. Now, we can use the model to perform simulations. For more involved purposes, consider using the objective function provided by [pyPESTO](https://github.com/icb-dcm/pypesto). For simple simulations, a function `simulate_petab` is available:" + "That's it. Now, we can use the model to perform simulations.\n", + "\n", + "## Simulating a PEtab problem\n", + "\n", + "For simple simulations, a function `simulate_petab` is available. This function will simulate the model for all conditions specified in the PEtab problem and compute the objective value (and if requested, the gradient). `simulate_petab` is mostly useful for running individual simulations. If large numbers of model simulations are required, there are more efficient means. In particular, for parameter estimation, consider using the optimized objective function provided by [pyPESTO](https://github.com/icb-dcm/pypesto).\n", + "\n", + "We use the `simulate_petab` function to simulate the model at the nominal parameters (i.e., the parameters specified in the PEtab problem in the `nominalValue` column of the parameter table):" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'llh': -138.22199570334107,\n", - " 'sllh': None,\n", - " 'rdatas': []}" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "simulate_petab(petab_problem, amici_model)" ] @@ -312,31 +98,21 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This performs a simulation at the nominal parameters. Parameters can also be directly specified, both scaled and unscaled:" + " Parameters can also be directly specified, both scaled and unscaled:" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'llh': -138.22199570334107,\n", - " 'sllh': None,\n", - " 'rdatas': []}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "parameters = {\n", " x_id: x_val\n", " for x_id, x_val in zip(petab_problem.x_ids, petab_problem.x_nominal_scaled)\n", + " # Fixed parameters cannot be changed in `simulate_petab`, unless we explicitly pass\n", + " # a `parameter_mapping` that was generated with `fill_fixed_parameters=False`\n", + " if x_id not in amici_model.getFixedParameterIds()\n", "}\n", "simulate_petab(\n", " petab_problem,\n", @@ -350,8 +126,60 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For further information, see the [documentation](https://amici.readthedocs.io/en/latest/)." + "## Working with PEtab-defined simulation conditions\n", + "\n", + "`simulate_petab` is convenient for quickly simulating PEtab-based problems, but for certain applications it may be too inflexible.\n", + "For example, it is not easily possible to obtain model outputs for time points other than the measurement timepoints specified in the PEtab problem. In such a case, the `PetabProblem` class can be used to easily generate AMICI `ExpData` objects representing PEtab-defined simulation conditions:" ] + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "app = PetabProblem(petab_problem)\n", + "\n", + "# ExpData for all conditions:\n", + "app.get_edatas()\n", + "\n", + "# ExpData for a single condition:\n", + "edata = app.get_edata(\"model1_data1\")" + ], + "metadata": { + "collapsed": false + }, + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "rdata = runAmiciSimulation(amici_model, solver=amici_model.getSolver(), edata=edata)\n", + "rdata" + ], + "metadata": { + "collapsed": false + }, + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "plot_state_trajectories(rdata)" + ], + "metadata": { + "collapsed": false + }, + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "For further information, check out the [AMICI documentation](https://amici.readthedocs.io/en/latest/)." + ], + "metadata": { + "collapsed": false + } } ], "metadata": { diff --git a/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb b/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb index 63fbc7a4ff..83f23273af 100644 --- a/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb +++ b/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb @@ -21,16 +21,12 @@ "# Directory to which the generated model code is written\n", "model_output_dir = model_name\n", "\n", + "from pprint import pprint\n", + "\n", "import libsbml\n", - "import amici\n", - "import amici.plotting\n", - "import os\n", - "import sys\n", - "import importlib\n", "import numpy as np\n", - "import pandas as pd\n", - "import matplotlib.pyplot as plt\n", - "from pprint import pprint" + "\n", + "import amici.plotting" ] }, { @@ -143,7 +139,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For this example we want specify the initial drug and kinase concentrations as experimental conditions. Accordingly we specify them as `fixedParameters`. The meaning of `fixedParameters` is defined in the [Glossary](https://amici.readthedocs.io/en/latest/glossary.html#term-fixed-parameters), which we display here for convenience." + "For this example we want to specify the initial drug and kinase concentrations as experimental conditions. Accordingly, we specify them as `fixedParameters`. The meaning of `fixedParameters` is defined in the [Glossary](https://amici.readthedocs.io/en/latest/glossary.html#term-fixed-parameters), which we display here for convenience." ] }, { @@ -369,7 +365,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The resulting trajectory is definitely not what one may expect. The problem is that the `DRUG_0` and `KIN_0` set initial conditions for species in the model. By default these initial conditions are only applied at the very beginning of the simulation, i.e., before the preequilibration. Accordingly, the `fixedParameters` that we specified do not have any effect. To fix this, we need to set the `reinitializeFixedParameterInitialStates` attribue to `True`, to spefify that AMICI reinitializes all states that have `fixedParameter`-dependent initial states." + "The resulting trajectory is definitely not what one may expect. The problem is that the `DRUG_0` and `KIN_0` set initial conditions for species in the model. By default, these initial conditions are only applied at the very beginning of the simulation, i.e., before the preequilibration. Accordingly, the `fixedParameters` that we specified do not have any effect. To fix this, we need to set the `reinitializeFixedParameterInitialStates` attribute to `True`, to specify that AMICI reinitializes all states that have `fixedParameter`-dependent initial states." ] }, { diff --git a/python/examples/example_splines/ExampleSplines.ipynb b/python/examples/example_splines/ExampleSplines.ipynb index d376ba91e5..0c237c6e6d 100644 --- a/python/examples/example_splines/ExampleSplines.ipynb +++ b/python/examples/example_splines/ExampleSplines.ipynb @@ -24,21 +24,20 @@ }, "outputs": [], "source": [ - "import sys\n", "import os\n", - "import libsbml\n", - "import amici\n", - "\n", - "import numpy as np\n", - "import sympy as sp\n", - "\n", - "from shutil import rmtree\n", + "import sys\n", "from importlib import import_module\n", - "from uuid import uuid1\n", + "from shutil import rmtree\n", "from tempfile import TemporaryDirectory\n", + "from uuid import uuid1\n", + "\n", "import matplotlib as mpl\n", + "import numpy as np\n", + "import sympy as sp\n", "from matplotlib import pyplot as plt\n", "\n", + "import amici\n", + "\n", "# Choose build directory\n", "BUILD_PATH = None # temporary folder\n", "# BUILD_PATH = 'build' # specified folder for debugging\n", @@ -458,8 +457,7 @@ "\t\t\t2\n", "\t\t\n", "\t\n", - "\n", - "\n" + "\n" ] } ], @@ -1137,7 +1135,6 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import seaborn as sns\n", "import tempfile\n", "import time" ] @@ -1179,7 +1176,7 @@ "metadata": {}, "outputs": [], "source": [ - "# If running as a Github action, just do the minimal amount of work required to check whether the code is working\n", + "# If running as a GitHub action, just do the minimal amount of work required to check whether the code is working\n", "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n", " nruns = 1\n", " num_nodes = [4]\n", diff --git a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb index 8e3ee6db10..e8b75e49aa 100644 --- a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb +++ b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb @@ -47,23 +47,19 @@ }, "outputs": [], "source": [ - "import os\n", - "import math\n", - "import logging\n", - "import contextlib\n", - "import multiprocessing\n", "import copy\n", + "import logging\n", + "import os\n", "\n", + "import libsbml\n", "import numpy as np\n", - "import sympy as sp\n", "import pandas as pd\n", + "import petab\n", + "import pypesto.petab\n", + "import sympy as sp\n", "from matplotlib import pyplot as plt\n", "\n", - "import libsbml\n", - "import amici\n", - "import petab\n", - "import pypesto\n", - "import pypesto.petab" + "import amici" ] }, { @@ -103,13 +99,13 @@ }, "outputs": [], "source": [ - "# If running as a Github action, just do the minimal amount of work required to check whether the code is working\n", + "# If running as a GitHub action, just do the minimal amount of work required to check whether the code is working\n", "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n", - " n_starts = 15\n", + " n_starts = 25\n", " pypesto_optimizer = pypesto.optimize.FidesOptimizer(\n", " verbose=logging.WARNING, options=dict(maxiter=10)\n", " )\n", - " pypesto_engine = pypesto.engine.SingleCoreEngine()" + " pypesto_engine = pypesto.engine.MultiProcessEngine()" ] }, { diff --git a/python/examples/example_steadystate/ExampleSteadystate.ipynb b/python/examples/example_steadystate/ExampleSteadystate.ipynb index b57ed522aa..d9f6ae635d 100644 --- a/python/examples/example_steadystate/ExampleSteadystate.ipynb +++ b/python/examples/example_steadystate/ExampleSteadystate.ipynb @@ -129,7 +129,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this example, we want to specify fixed parameters, observables and a $\\sigma$ parameter. Unfortunately, the latter two are not part of the [SBML standard](http://sbml.org/). However, they can be provided to `amici.SbmlImporter.sbml2amici` as demonstrated in the following." + "In this example, we want to specify fixed parameters, observables and a $\\sigma$ parameter. Unfortunately, the latter two are not part of the [SBML standard](https://sbml.org/). However, they can be provided to `amici.SbmlImporter.sbml2amici` as demonstrated in the following." ] }, { diff --git a/python/sdist/amici/__init__.py b/python/sdist/amici/__init__.py index bcb7387fbf..b06ea6d0f3 100644 --- a/python/sdist/amici/__init__.py +++ b/python/sdist/amici/__init__.py @@ -6,7 +6,6 @@ models and turning them into C++ Python extensions. """ - import contextlib import importlib import os @@ -108,18 +107,21 @@ def _imported_from_setup() -> bool: # from .swig_wrappers hdf5_enabled = "readSolverSettingsFromHDF5" in dir() # These modules require the swig interface and other dependencies - from .numpy import ExpDataView, ReturnDataView + from .numpy import ExpDataView, ReturnDataView # noqa: F401 from .pandas import * from .swig_wrappers import * # These modules don't require the swig interface from typing import Protocol, runtime_checkable - from .de_export import DEExporter, DEModel - from .sbml_import import SbmlImporter, assignmentRules2observables + from .de_export import DEExporter, DEModel # noqa: F401 + from .sbml_import import ( # noqa: F401 + SbmlImporter, + assignmentRules2observables, + ) @runtime_checkable - class ModelModule(Protocol): + class ModelModule(Protocol): # noqa: F811 """Type of AMICI-generated model modules. To enable static type checking.""" @@ -132,6 +134,8 @@ def get_model(self) -> amici.Model: """Create a model instance.""" ... + AmiciModel = Union[amici.Model, amici.ModelPtr] + class add_path: """Context manager for temporarily changing PYTHONPATH""" diff --git a/python/sdist/amici/__init__.template.py b/python/sdist/amici/__init__.template.py index c37ac0f962..f5e49b03dd 100644 --- a/python/sdist/amici/__init__.template.py +++ b/python/sdist/amici/__init__.template.py @@ -15,7 +15,7 @@ "version currently installed." ) -from .TPL_MODELNAME import * -from .TPL_MODELNAME import getModel as get_model +from .TPL_MODELNAME import * # noqa: F403, F401 +from .TPL_MODELNAME import getModel as get_model # noqa: F401 __version__ = "TPL_PACKAGE_VERSION" diff --git a/python/sdist/amici/__main__.py b/python/sdist/amici/__main__.py index 165f5d9516..bf179cf871 100644 --- a/python/sdist/amici/__main__.py +++ b/python/sdist/amici/__main__.py @@ -1,6 +1,5 @@ """Package-level entrypoint""" -import os import sys from . import __version__, compiledWithOpenMP, has_clibs, hdf5_enabled diff --git a/python/sdist/amici/conserved_quantities_demartino.py b/python/sdist/amici/conserved_quantities_demartino.py index 5b04fa1479..4f2d326b54 100644 --- a/python/sdist/amici/conserved_quantities_demartino.py +++ b/python/sdist/amici/conserved_quantities_demartino.py @@ -2,7 +2,8 @@ import math import random import sys -from typing import List, MutableSequence, Optional, Sequence, Tuple, Union +from typing import Optional, Union +from collections.abc import MutableSequence, Sequence from .logging import get_logger @@ -22,7 +23,7 @@ def compute_moiety_conservation_laws( max_num_monte_carlo: int = 20, rng_seed: Union[None, bool, int] = False, species_names: Optional[Sequence[str]] = None, -) -> Tuple[List[List[int]], List[List[float]]]: +) -> tuple[list[list[int]], list[list[float]]]: """Compute moiety conservation laws. According to the algorithm proposed by De Martino et al. (2014) @@ -112,9 +113,9 @@ def compute_moiety_conservation_laws( def _output( int_kernel_dim: int, kernel_dim: int, - int_matched: List[int], - species_indices: List[List[int]], - species_coefficients: List[List[float]], + int_matched: list[int], + species_indices: list[list[int]], + species_coefficients: list[list[float]], species_names: Optional[Sequence[str]] = None, verbose: bool = False, log_level: int = logging.DEBUG, @@ -203,7 +204,7 @@ def _qsort( def _kernel( stoichiometric_list: Sequence[float], num_species: int, num_reactions: int -) -> Tuple[int, List[int], int, List[int], List[List[int]], List[List[float]]]: +) -> tuple[int, list[int], int, list[int], list[list[int]], list[list[float]]]: """ Kernel (left nullspace of :math:`S`) calculation by Gaussian elimination @@ -227,8 +228,8 @@ def _kernel( kernel dimension, MCLs, integer kernel dimension, integer MCLs and indices to species and reactions in the preceding order as a tuple """ - matrix: List[List[int]] = [[] for _ in range(num_species)] - matrix2: List[List[float]] = [[] for _ in range(num_species)] + matrix: list[list[int]] = [[] for _ in range(num_species)] + matrix2: list[list[float]] = [[] for _ in range(num_species)] i_reaction = 0 i_species = 0 for val in stoichiometric_list: @@ -243,7 +244,7 @@ def _kernel( matrix[i].append(num_reactions + i) matrix2[i].append(1) - order: List[int] = list(range(num_species)) + order: list[int] = list(range(num_species)) pivots = [ matrix[i][0] if len(matrix[i]) else _MAX for i in range(num_species) ] @@ -283,7 +284,7 @@ def _kernel( if pivots[order[j + 1]] == pivots[order[j]] != _MAX: k1 = order[j + 1] k2 = order[j] - column: List[float] = [0] * (num_species + num_reactions) + column: list[float] = [0] * (num_species + num_reactions) g = matrix2[k2][0] / matrix2[k1][0] for i in range(1, len(matrix[k1])): column[matrix[k1][i]] = matrix2[k1][i] * g @@ -369,7 +370,7 @@ def _fill( stoichiometric_list: Sequence[float], matched: Sequence[int], num_species: int, -) -> Tuple[List[List[int]], List[List[int]], List[int]]: +) -> tuple[list[list[int]], list[list[int]], list[int]]: """Construct interaction matrix Construct the interaction matrix out of the given stoichiometric matrix @@ -454,8 +455,8 @@ def _is_linearly_dependent( boolean indicating linear dependence (true) or not (false) """ K = int_kernel_dim + 1 - matrix: List[List[int]] = [[] for _ in range(K)] - matrix2: List[List[float]] = [[] for _ in range(K)] + matrix: list[list[int]] = [[] for _ in range(K)] + matrix2: list[list[float]] = [[] for _ in range(K)] # Populate matrices with species ids and coefficients for CLs for i in range(K - 1): for j in range(len(cls_species_idxs[i])): @@ -508,7 +509,7 @@ def _is_linearly_dependent( if pivots[order[j + 1]] == pivots[order[j]] != _MAX: k1 = order[j + 1] k2 = order[j] - column: List[float] = [0] * num_species + column: list[float] = [0] * num_species g = matrix2[k2][0] / matrix2[k1][0] for i in range(1, len(matrix[k1])): column[matrix[k1][i]] = matrix2[k1][i] * g @@ -540,7 +541,7 @@ def _monte_carlo( initial_temperature: float = 1, cool_rate: float = 1e-3, max_iter: int = 10, -) -> Tuple[bool, int, Sequence[int]]: +) -> tuple[bool, int, Sequence[int]]: """MonteCarlo simulated annealing for finding integer MCLs Finding integer solutions for the MCLs by Monte Carlo, see step (b) in @@ -712,8 +713,8 @@ def _relax( (``False``) """ K = len(int_matched) - matrix: List[List[int]] = [[] for _ in range(K)] - matrix2: List[List[float]] = [[] for _ in range(K)] + matrix: list[list[int]] = [[] for _ in range(K)] + matrix2: list[list[float]] = [[] for _ in range(K)] i_reaction = 0 i_species = 0 for val in stoichiometric_list: @@ -767,7 +768,7 @@ def _relax( if pivots[order[j + 1]] == pivots[order[j]] != _MAX: k1 = order[j + 1] k2 = order[j] - column: List[float] = [0] * num_reactions + column: list[float] = [0] * num_reactions g = matrix2[k2][0] / matrix2[k1][0] for i in range(1, len(matrix[k1])): column[matrix[k1][i]] = matrix2[k1][i] * g @@ -807,7 +808,7 @@ def _relax( # subtract rows # matrix2[k] = matrix2[k] - matrix2[j] * matrix2[k][i] - row_k: List[float] = [0] * num_reactions + row_k: list[float] = [0] * num_reactions for a in range(len(matrix[k])): row_k[matrix[k][a]] = matrix2[k][a] for a in range(len(matrix[j])): @@ -955,7 +956,7 @@ def _reduce( k1 = order[i] for j in range(i + 1, K): k2 = order[j] - column: List[float] = [0] * num_species + column: list[float] = [0] * num_species for species_idx, coefficient in zip( cls_species_idxs[k1], cls_coefficients[k1] ): diff --git a/python/sdist/amici/conserved_quantities_rref.py b/python/sdist/amici/conserved_quantities_rref.py index 46028e94b0..b16053ab08 100644 --- a/python/sdist/amici/conserved_quantities_rref.py +++ b/python/sdist/amici/conserved_quantities_rref.py @@ -1,6 +1,6 @@ """Find conserved quantities deterministically""" -from typing import List, Literal, Optional, Union +from typing import Literal, Optional, Union import numpy as np @@ -67,7 +67,7 @@ def _round(mat): return mat -def pivots(mat: np.array) -> List[int]: +def pivots(mat: np.array) -> list[int]: """Get indices of pivot columns in ``mat``, assumed to be in reduced row echelon form""" pivot_cols = [] diff --git a/python/sdist/amici/cxxcodeprinter.py b/python/sdist/amici/cxxcodeprinter.py index e6e377b331..3fe5b8cd17 100644 --- a/python/sdist/amici/cxxcodeprinter.py +++ b/python/sdist/amici/cxxcodeprinter.py @@ -2,7 +2,8 @@ import itertools import os import re -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Optional +from collections.abc import Iterable import sympy as sp from sympy.codegen.rewriting import Optimization, optimize @@ -73,7 +74,7 @@ def _print_min_max(self, expr, cpp_fun: str, sympy_fun): ) if len(expr.args) == 1: return self._print(arg0) - return "%s%s(%s, %s)" % ( + return "{}{}({}, {})".format( self._ns, cpp_fun, self._print(arg0), @@ -92,7 +93,7 @@ def _print_Max(self, expr): def _get_sym_lines_array( self, equations: sp.Matrix, variable: str, indent_level: int - ) -> List[str]: + ) -> list[str]: """ Generate C++ code for assigning symbolic terms in symbols to C++ array `variable`. @@ -122,7 +123,7 @@ def _get_sym_lines_symbols( equations: sp.Matrix, variable: str, indent_level: int, - ) -> List[str]: + ) -> list[str]: """ Generate C++ code for where array elements are directly replaced with their corresponding macro symbol @@ -209,11 +210,11 @@ def format_line(symbol: sp.Symbol): def csc_matrix( self, matrix: sp.Matrix, - rownames: List[sp.Symbol], - colnames: List[sp.Symbol], + rownames: list[sp.Symbol], + colnames: list[sp.Symbol], identifier: Optional[int] = 0, pattern_only: Optional[bool] = False, - ) -> Tuple[List[int], List[int], sp.Matrix, List[str], sp.Matrix]: + ) -> tuple[list[int], list[int], sp.Matrix, list[str], sp.Matrix]: """ Generates the sparse symbolic identifiers, symbolic identifiers, sparse matrix, column pointers and row values for a symbolic @@ -298,12 +299,14 @@ def print_bool(expr) -> str: def get_switch_statement( condition: str, - cases: Dict[int, List[str]], + cases: dict[int, list[str]], indentation_level: Optional[int] = 0, indentation_step: Optional[str] = " " * 4, ): """ - Generate code for switch statement + Generate code for a C++ switch statement. + + Generate code for a C++ switch statement with a ``break`` after each case. :param condition: Condition for switch @@ -321,26 +324,39 @@ def get_switch_statement( :return: Code for switch expression as list of strings """ - lines = [] - if not cases: - return lines + return [] indent0 = indentation_level * indentation_step indent1 = (indentation_level + 1) * indentation_step indent2 = (indentation_level + 2) * indentation_step + + # try to find redundant statements and collapse those cases + # map statements to case expressions + cases_map: dict[tuple[str, ...], list[str]] = {} for expression, statements in cases.items(): if statements: - lines.extend( + statement_code = tuple( [ - f"{indent1}case {expression}:", *(f"{indent2}{statement}" for statement in statements), f"{indent2}break;", ] ) - - if lines: - lines.insert(0, f"{indent0}switch({condition}) {{") - lines.append(indent0 + "}") - - return lines + case_code = f"{indent1}case {expression}:" + + cases_map[statement_code] = cases_map.get(statement_code, []) + [ + case_code + ] + + if not cases_map: + return [] + + return [ + f"{indent0}switch({condition}) {{", + *( + code + for codes in cases_map.items() + for code in itertools.chain.from_iterable(reversed(codes)) + ), + indent0 + "}", + ] diff --git a/python/sdist/amici/de_export.py b/python/sdist/amici/de_export.py index b1fa02c421..4e7e0999f2 100644 --- a/python/sdist/amici/de_export.py +++ b/python/sdist/amici/de_export.py @@ -25,15 +25,11 @@ TYPE_CHECKING, Any, Callable, - Dict, - List, Literal, Optional, - Sequence, - Set, - Tuple, Union, ) +from collections.abc import Sequence import numpy as np import sympy as sp @@ -58,8 +54,8 @@ generate_flux_symbol, smart_subs_dict, strip_pysb, - symbol_with_assumptions, toposort_symbols, + unique_preserve_order, ) from .logging import get_logger, log_execution_time, set_log_level @@ -587,7 +583,7 @@ def smart_multiply( def smart_is_zero_matrix( - x: Union[sp.MutableDenseMatrix, sp.MutableSparseMatrix] + x: Union[sp.MutableDenseMatrix, sp.MutableSparseMatrix], ) -> bool: """A faster implementation of sympy's is_zero_matrix @@ -757,23 +753,23 @@ def __init__( Whether to cache calls to the simplify method. Can e.g. decrease import times for models with events. """ - self._differential_states: List[DifferentialState] = [] - self._algebraic_states: List[AlgebraicState] = [] - self._algebraic_equations: List[AlgebraicEquation] = [] - self._observables: List[Observable] = [] - self._event_observables: List[EventObservable] = [] - self._sigma_ys: List[SigmaY] = [] - self._sigma_zs: List[SigmaZ] = [] - self._parameters: List[Parameter] = [] - self._constants: List[Constant] = [] - self._log_likelihood_ys: List[LogLikelihoodY] = [] - self._log_likelihood_zs: List[LogLikelihoodZ] = [] - self._log_likelihood_rzs: List[LogLikelihoodRZ] = [] - self._expressions: List[Expression] = [] - self._conservation_laws: List[ConservationLaw] = [] - self._events: List[Event] = [] + self._differential_states: list[DifferentialState] = [] + self._algebraic_states: list[AlgebraicState] = [] + self._algebraic_equations: list[AlgebraicEquation] = [] + self._observables: list[Observable] = [] + self._event_observables: list[EventObservable] = [] + self._sigma_ys: list[SigmaY] = [] + self._sigma_zs: list[SigmaZ] = [] + self._parameters: list[Parameter] = [] + self._constants: list[Constant] = [] + self._log_likelihood_ys: list[LogLikelihoodY] = [] + self._log_likelihood_zs: list[LogLikelihoodZ] = [] + self._log_likelihood_rzs: list[LogLikelihoodRZ] = [] + self._expressions: list[Expression] = [] + self._conservation_laws: list[ConservationLaw] = [] + self._events: list[Event] = [] self.splines = [] - self._symboldim_funs: Dict[str, Callable[[], int]] = { + self._symboldim_funs: dict[str, Callable[[], int]] = { "sx": self.num_states_solver, "v": self.num_states_solver, "vB": self.num_states_solver, @@ -781,23 +777,23 @@ def __init__( "sigmay": self.num_obs, "sigmaz": self.num_eventobs, } - self._eqs: Dict[ + self._eqs: dict[ str, Union[ sp.Matrix, sp.SparseMatrix, - List[Union[sp.Matrix, sp.SparseMatrix]], + list[Union[sp.Matrix, sp.SparseMatrix]], ], ] = dict() - self._sparseeqs: Dict[str, Union[sp.Matrix, List[sp.Matrix]]] = dict() - self._vals: Dict[str, List[sp.Expr]] = dict() - self._names: Dict[str, List[str]] = dict() - self._syms: Dict[str, Union[sp.Matrix, List[sp.Matrix]]] = dict() - self._sparsesyms: Dict[str, Union[List[str], List[List[str]]]] = dict() - self._colptrs: Dict[str, Union[List[int], List[List[int]]]] = dict() - self._rowvals: Dict[str, Union[List[int], List[List[int]]]] = dict() - - self._equation_prototype: Dict[str, Callable] = { + self._sparseeqs: dict[str, Union[sp.Matrix, list[sp.Matrix]]] = dict() + self._vals: dict[str, list[sp.Expr]] = dict() + self._names: dict[str, list[str]] = dict() + self._syms: dict[str, Union[sp.Matrix, list[sp.Matrix]]] = dict() + self._sparsesyms: dict[str, Union[list[str], list[list[str]]]] = dict() + self._colptrs: dict[str, Union[list[int], list[list[int]]]] = dict() + self._rowvals: dict[str, Union[list[int], list[list[int]]]] = dict() + + self._equation_prototype: dict[str, Callable] = { "total_cl": self.conservation_laws, "x0": self.states, "y": self.observables, @@ -809,7 +805,7 @@ def __init__( "sigmay": self.sigma_ys, "sigmaz": self.sigma_zs, } - self._variable_prototype: Dict[str, Callable] = { + self._variable_prototype: dict[str, Callable] = { "tcl": self.conservation_laws, "x_rdata": self.states, "y": self.observables, @@ -821,12 +817,12 @@ def __init__( "sigmaz": self.sigma_zs, "h": self.events, } - self._value_prototype: Dict[str, Callable] = { + self._value_prototype: dict[str, Callable] = { "p": self.parameters, "k": self.constants, } - self._total_derivative_prototypes: Dict[ - str, Dict[str, Union[str, List[str]]] + self._total_derivative_prototypes: dict[ + str, dict[str, Union[str, list[str]]] ] = { "sroot": { "eq": "root", @@ -836,13 +832,13 @@ def __init__( }, } - self._lock_total_derivative: List[str] = list() + self._lock_total_derivative: list[str] = list() self._simplify: Callable = simplify if cache_simplify and simplify is not None: def cached_simplify( expr: sp.Expr, - _simplified: Dict[str, sp.Expr] = {}, + _simplified: dict[str, sp.Expr] = {}, _simplify: Callable = simplify, ) -> sp.Expr: """Speed up expression simplification with caching. @@ -878,59 +874,59 @@ def cached_simplify( for fun in CUSTOM_FUNCTIONS: self._code_printer.known_functions[fun["sympy"]] = fun["c++"] - def differential_states(self) -> List[DifferentialState]: + def differential_states(self) -> list[DifferentialState]: """Get all differential states.""" return self._differential_states - def algebraic_states(self) -> List[AlgebraicState]: + def algebraic_states(self) -> list[AlgebraicState]: """Get all algebraic states.""" return self._algebraic_states - def observables(self) -> List[Observable]: + def observables(self) -> list[Observable]: """Get all observables.""" return self._observables - def parameters(self) -> List[Parameter]: + def parameters(self) -> list[Parameter]: """Get all parameters.""" return self._parameters - def constants(self) -> List[Constant]: + def constants(self) -> list[Constant]: """Get all constants.""" return self._constants - def expressions(self) -> List[Expression]: + def expressions(self) -> list[Expression]: """Get all expressions.""" return self._expressions - def events(self) -> List[Event]: + def events(self) -> list[Event]: """Get all events.""" return self._events - def event_observables(self) -> List[EventObservable]: + def event_observables(self) -> list[EventObservable]: """Get all event observables.""" return self._event_observables - def sigma_ys(self) -> List[SigmaY]: + def sigma_ys(self) -> list[SigmaY]: """Get all observable sigmas.""" return self._sigma_ys - def sigma_zs(self) -> List[SigmaZ]: + def sigma_zs(self) -> list[SigmaZ]: """Get all event observable sigmas.""" return self._sigma_zs - def conservation_laws(self) -> List[ConservationLaw]: + def conservation_laws(self) -> list[ConservationLaw]: """Get all conservation laws.""" return self._conservation_laws - def log_likelihood_ys(self) -> List[LogLikelihoodY]: + def log_likelihood_ys(self) -> list[LogLikelihoodY]: """Get all observable log likelihoodss.""" return self._log_likelihood_ys - def log_likelihood_zs(self) -> List[LogLikelihoodZ]: + def log_likelihood_zs(self) -> list[LogLikelihoodZ]: """Get all event observable log likelihoods.""" return self._log_likelihood_zs - def log_likelihood_rzs(self) -> List[LogLikelihoodRZ]: + def log_likelihood_rzs(self) -> list[LogLikelihoodRZ]: """Get all event observable regularization log likelihoods.""" return self._log_likelihood_rzs @@ -938,7 +934,7 @@ def is_ode(self) -> bool: """Check if model is ODE model.""" return len(self._algebraic_equations) == 0 - def states(self) -> List[State]: + def states(self) -> list[State]: """Get all states.""" return self._differential_states + self._algebraic_states @@ -1265,7 +1261,7 @@ def add_conservation_law( self, state: sp.Symbol, total_abundance: sp.Symbol, - coefficients: Dict[sp.Symbol, sp.Expr], + coefficients: dict[sp.Symbol, sp.Expr], ) -> None: r""" Adds a new conservation law to the model. A conservation law is defined @@ -1331,7 +1327,7 @@ def add_conservation_law( self.add_component(cl) self._differential_states[ix].set_conservation_law(cl) - def get_observable_transformations(self) -> List[ObservableTransformation]: + def get_observable_transformations(self) -> list[ObservableTransformation]: """ List of observable transformations @@ -1425,13 +1421,24 @@ def num_expr(self) -> int: return len(self.sym("w")) def num_events(self) -> int: + """ + Total number of Events (those for which root-functions are added and those without). + + :return: + number of events + """ + return len(self.sym("h")) + + def num_events_solver(self) -> int: """ Number of Events. :return: number of event symbols (length of the root vector in AMICI) """ - return len(self.sym("h")) + return sum( + not event.triggers_at_fixed_timepoint() for event in self.events() + ) def sym(self, name: str) -> sp.Matrix: """ @@ -1449,7 +1456,7 @@ def sym(self, name: str) -> sp.Matrix: return self._syms[name] - def sparsesym(self, name: str, force_generate: bool = True) -> List[str]: + def sparsesym(self, name: str, force_generate: bool = True) -> list[str]: """ Returns (and constructs if necessary) the sparsified identifiers for a sparsified symbolic variable. @@ -1505,7 +1512,7 @@ def sparseeq(self, name) -> sp.Matrix: def colptrs( self, name: str - ) -> Union[List[sp.Number], List[List[sp.Number]]]: + ) -> Union[list[sp.Number], list[list[sp.Number]]]: """ Returns (and constructs if necessary) the column pointers for a sparsified symbolic variable. @@ -1524,7 +1531,7 @@ def colptrs( def rowvals( self, name: str - ) -> Union[List[sp.Number], List[List[sp.Number]]]: + ) -> Union[list[sp.Number], list[list[sp.Number]]]: """ Returns (and constructs if necessary) the row values for a sparsified symbolic variable. @@ -1541,7 +1548,7 @@ def rowvals( self._generate_sparse_symbol(name) return self._rowvals[name] - def val(self, name: str) -> List[sp.Number]: + def val(self, name: str) -> list[sp.Number]: """ Returns (and constructs if necessary) the numeric values of a symbolic entity @@ -1556,7 +1563,7 @@ def val(self, name: str) -> List[sp.Number]: self._generate_value(name) return self._vals[name] - def name(self, name: str) -> List[str]: + def name(self, name: str) -> list[str]: """ Returns (and constructs if necessary) the names of a symbolic variable @@ -1571,7 +1578,7 @@ def name(self, name: str) -> List[str]: self._generate_name(name) return self._names[name] - def free_symbols(self) -> Set[sp.Basic]: + def free_symbols(self) -> set[sp.Basic]: """ Returns list of free symbols that appear in RHS and initial conditions. @@ -1750,7 +1757,17 @@ def parse_events(self) -> None: # add roots of heaviside functions self.add_component(root) - def get_appearance_counts(self, idxs: List[int]) -> List[int]: + # re-order events - first those that require root tracking, then the others + self._events = list( + chain( + itertools.filterfalse( + Event.triggers_at_fixed_timepoint, self._events + ), + filter(Event.triggers_at_fixed_timepoint, self._events), + ) + ) + + def get_appearance_counts(self, idxs: list[int]) -> list[int]: """ Counts how often a state appears in the time derivative of another state and expressions for a subset of states @@ -2211,6 +2228,23 @@ def _compute_equation(self, name: str) -> None: else: raise ValueError(f"Unknown equation {name}") + if name in ("sigmay", "sigmaz"): + # check for states in sigma{y,z}, which is currently not supported + syms_x = self.sym("x") + syms_yz = self.sym(name.removeprefix("sigma")) + xs_in_sigma = {} + for sym_yz, eq_yz in zip(syms_yz, self._eqs[name]): + yz_free_syms = eq_yz.free_symbols + if tmp := {x for x in syms_x if x in yz_free_syms}: + xs_in_sigma[sym_yz] = tmp + if xs_in_sigma: + msg = ", ".join( + [f"{yz} depends on {xs}" for yz, xs in xs_in_sigma.items()] + ) + raise NotImplementedError( + f"State-dependent observables are not supported, but {msg}." + ) + if name == "root": # Events are processed after the model has been set up. # Equations are there, but symbols for roots must be added @@ -2237,7 +2271,7 @@ def _compute_equation(self, name: str) -> None: self._eqs[name], self._simplify ) - def sym_names(self) -> List[str]: + def sym_names(self) -> list[str]: """ Returns a list of names of generated symbolic variables @@ -2330,7 +2364,7 @@ def _total_derivative( self, name: str, eq: str, - chainvars: List[str], + chainvars: list[str], var: str, dydx_name: str = None, dxdz_name: str = None, @@ -2469,7 +2503,7 @@ def _multiplication( self._eqs[name] = sign * smart_multiply(xx, yy) def _equation_from_components( - self, name: str, components: List[ModelQuantity] + self, name: str, components: list[ModelQuantity] ) -> None: """ Generates the formulas of a symbolic variable from the attributes @@ -2482,7 +2516,7 @@ def _equation_from_components( """ self._eqs[name] = sp.Matrix([comp.get_val() for comp in components]) - def get_conservation_laws(self) -> List[Tuple[sp.Symbol, sp.Expr]]: + def get_conservation_laws(self) -> list[tuple[sp.Symbol, sp.Expr]]: """Returns a list of states with conservation law set :return: @@ -2559,7 +2593,7 @@ def state_has_conservation_law(self, ix: int) -> bool: """ return self.states()[ix].has_conservation_law() - def get_solver_indices(self) -> Dict[int, int]: + def get_solver_indices(self) -> dict[int, int]: """ Returns a mapping that maps rdata species indices to solver indices @@ -2633,7 +2667,7 @@ def _expr_is_time_dependent(self, expr: sp.Expr) -> bool: def _get_unique_root( self, root_found: sp.Expr, - roots: List[Event], + roots: list[Event], ) -> Union[sp.Symbol, None]: """ Collects roots of Heaviside functions and events and stores them in @@ -2671,7 +2705,7 @@ def _get_unique_root( def _collect_heaviside_roots( self, args: Sequence[sp.Expr], - ) -> List[sp.Expr]: + ) -> list[sp.Expr]: """ Recursively checks an expression for the occurrence of Heaviside functions and return all roots found @@ -2708,7 +2742,7 @@ def _collect_heaviside_roots( def _process_heavisides( self, dxdt: sp.Expr, - roots: List[Event], + roots: list[Event], ) -> sp.Expr: """ Parses the RHS of a state variable, checks for Heaviside functions, @@ -2724,16 +2758,12 @@ def _process_heavisides( :returns: dxdt with Heaviside functions replaced by amici helper variables """ - - # expanding the rhs will in general help to collect the same - # heaviside function - dt_expanded = dxdt.expand() # track all the old Heaviside expressions in tmp_roots_old # replace them later by the new expressions heavisides = [] # run through the expression tree and get the roots - tmp_roots_old = self._collect_heaviside_roots(dt_expanded.args) - for tmp_old in tmp_roots_old: + tmp_roots_old = self._collect_heaviside_roots(dxdt.args) + for tmp_old in unique_preserve_order(tmp_roots_old): # we want unique identifiers for the roots tmp_new = self._get_unique_root(tmp_old, roots) # `tmp_new` is None if the root is not time-dependent. @@ -2867,7 +2897,7 @@ def __init__( ) # To only generate a subset of functions, apply subselection here - self.functions: Dict[str, _FunctionInfo] = copy.deepcopy(functions) + self.functions: dict[str, _FunctionInfo] = copy.deepcopy(functions) self.allow_reinit_fixpar_initcond: bool = allow_reinit_fixpar_initcond self._build_hints = set() @@ -3045,7 +3075,7 @@ def _generate_m_code(self) -> None: with open(compile_script, "w") as fileout: fileout.write("\n".join(lines)) - def _get_index(self, name: str) -> Dict[sp.Symbol, int]: + def _get_index(self, name: str) -> dict[sp.Symbol, int]: """ Compute indices for a symbolic array. :param name: @@ -3252,7 +3282,7 @@ def _write_function_file(self, function: str) -> None: def _generate_function_index( self, function: str, indextype: Literal["colptrs", "rowvals"] - ) -> List[str]: + ) -> list[str]: """ Generate equations and C++ code for the function ``function``. @@ -3354,7 +3384,7 @@ def _generate_function_index( def _get_function_body( self, function: str, equations: sp.Matrix - ) -> List[str]: + ) -> list[str]: """ Generate C++ code for body of function ``function``. @@ -3642,6 +3672,7 @@ def _write_model_header_cpp(self) -> None: "NZ": self.model.num_eventobs(), "NZTRUE": self.model.num_eventobs(), "NEVENT": self.model.num_events(), + "NEVENT_SOLVER": self.model.num_events_solver(), "NOBJECTIVE": "1", "NSPL": len(self.model.splines), "NW": len(self.model.sym("w")), @@ -3736,12 +3767,11 @@ def _write_model_header_cpp(self) -> None: ) ), "Z2EVENT": ", ".join(map(str, self.model._z2event)), + "STATE_INDEPENDENT_EVENTS": self._get_state_independent_event_intializer(), "ID": ", ".join( - ( - str(float(isinstance(s, DifferentialState))) - for s in self.model.states() - if not s.has_conservation_law() - ) + str(float(isinstance(s, DifferentialState))) + for s in self.model.states() + if not s.has_conservation_law() ), } @@ -3871,6 +3901,27 @@ def _get_symbol_id_initializer_list(self, name: str) -> str: for idx, symbol in enumerate(self.model.sym(name)) ) + def _get_state_independent_event_intializer(self) -> str: + """Get initializer list for state independent events in amici::Model.""" + map_time_to_event_idx = {} + for event_idx, event in enumerate(self.model.events()): + if not event.triggers_at_fixed_timepoint(): + continue + trigger_time = float(event.get_trigger_time()) + try: + map_time_to_event_idx[trigger_time].append(event_idx) + except KeyError: + map_time_to_event_idx[trigger_time] = [event_idx] + + def vector_initializer(v): + """std::vector initializer list with elements from `v`""" + return f"{{{', '.join(map(str, v))}}}" + + return ", ".join( + f"{{{trigger_time}, {vector_initializer(event_idxs)}}}" + for trigger_time, event_idxs in map_time_to_event_idx.items() + ) + def _write_c_make_file(self): """Write CMake ``CMakeLists.txt`` file for this model.""" sources = "\n".join( @@ -3986,7 +4037,7 @@ class TemplateAmici(Template): def apply_template( source_file: Union[str, Path], target_file: Union[str, Path], - template_data: Dict[str, str], + template_data: dict[str, str], ) -> None: """ Load source file, apply template substitution as provided in diff --git a/python/sdist/amici/de_model.py b/python/sdist/amici/de_model.py index 77d9013ad2..c20509407a 100644 --- a/python/sdist/amici/de_model.py +++ b/python/sdist/amici/de_model.py @@ -1,13 +1,14 @@ """Objects for AMICI's internal differential equation model representation""" import abc import numbers -from typing import Dict, Optional, Set, SupportsFloat, Union +from typing import Optional, SupportsFloat, Union import sympy as sp from .import_utils import ( RESERVED_SYMBOLS, ObservableTransformation, + amici_time_symbol, cast_to_sym, generate_measurement_symbol, generate_regularization_symbol, @@ -137,7 +138,7 @@ def __init__( identifier: sp.Symbol, name: str, value: sp.Expr, - coefficients: Dict[sp.Symbol, sp.Expr], + coefficients: dict[sp.Symbol, sp.Expr], state_id: sp.Symbol, ): """ @@ -159,9 +160,9 @@ def __init__( identifier of the state that this conservation law replaces """ self._state_expr: sp.Symbol = identifier - (value - state_id) - self._coefficients: Dict[sp.Symbol, sp.Expr] = coefficients + self._coefficients: dict[sp.Symbol, sp.Expr] = coefficients self._ncoeff: sp.Expr = coefficients[state_id] - super(ConservationLaw, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) def get_ncoeff(self, state_id) -> Union[sp.Expr, int, float]: """ @@ -200,9 +201,7 @@ def __init__(self, identifier: str, value: sp.Expr): formula of the algebraic equation, solution is given by ``formula == 0`` """ - super(AlgebraicEquation, self).__init__( - sp.Symbol(identifier), identifier, value - ) + super().__init__(sp.Symbol(identifier), identifier, value) def get_free_symbols(self): return self._value.free_symbols @@ -271,7 +270,7 @@ def __init__(self, identifier: sp.Symbol, name: str, init: sp.Expr): :param init: initial value of the AlgebraicState """ - super(AlgebraicState, self).__init__(identifier, name, init) + super().__init__(identifier, name, init) def has_conservation_law(self): """ @@ -321,7 +320,7 @@ def __init__( :param dt: time derivative """ - super(DifferentialState, self).__init__(identifier, name, init) + super().__init__(identifier, name, init) self._dt = cast_to_sym(dt, "dt") self._conservation_law: Union[ConservationLaw, None] = None @@ -362,7 +361,7 @@ def get_dt(self) -> sp.Expr: """ return self._dt - def get_free_symbols(self) -> Set[sp.Basic]: + def get_free_symbols(self) -> set[sp.Basic]: """ Gets the set of free symbols in time derivative and initial conditions @@ -422,7 +421,7 @@ def __init__( observable transformation, only applies when evaluating objective function or residuals """ - super(Observable, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) self._measurement_symbol = measurement_symbol self._regularization_symbol = None self.trafo = transformation @@ -480,7 +479,7 @@ def __init__( :param event: Symbolic identifier of the corresponding event. """ - super(EventObservable, self).__init__( + super().__init__( identifier, name, value, measurement_symbol, transformation ) self._event: sp.Symbol = event @@ -519,7 +518,7 @@ def __init__(self, identifier: sp.Symbol, name: str, value: sp.Expr): raise RuntimeError( "This class is meant to be sub-classed, not used directly." ) - super(Sigma, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) class SigmaY(Sigma): @@ -555,7 +554,7 @@ def __init__(self, identifier: sp.Symbol, name: str, value: sp.Expr): :param value: formula """ - super(Expression, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) class Parameter(ModelQuantity): @@ -580,7 +579,7 @@ def __init__( :param value: numeric value """ - super(Parameter, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) class Constant(ModelQuantity): @@ -604,7 +603,7 @@ def __init__( :param value: numeric value """ - super(Constant, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) class LogLikelihood(ModelQuantity): @@ -633,7 +632,7 @@ def __init__(self, identifier: sp.Symbol, name: str, value: sp.Expr): raise RuntimeError( "This class is meant to be sub-classed, not used directly." ) - super(LogLikelihood, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) class LogLikelihoodY(LogLikelihood): @@ -691,11 +690,14 @@ def __init__( initial boolean value of the trigger function at t0. If set to `False`, events may trigger at ``t==t0``, otherwise not. """ - super(Event, self).__init__(identifier, name, value) + super().__init__(identifier, name, value) # add the Event specific components self._state_update = state_update self._initial_value = initial_value + # expression(s) for the timepoint(s) at which the event triggers + self._t_root = sp.solve(self.get_val(), amici_time_symbol) + def get_initial_value(self) -> bool: """ Return the initial value for the root function. @@ -713,3 +715,20 @@ def __eq__(self, other): return self.get_val() == other.get_val() and ( self.get_initial_value() == other.get_initial_value() ) + + def triggers_at_fixed_timepoint(self) -> bool: + """Check whether the event triggers at a (single) fixed time-point.""" + if len(self._t_root) != 1: + return False + return self._t_root[0].is_Number + + def get_trigger_time(self) -> sp.Float: + """Get the time at which the event triggers. + + Only for events that trigger at a single fixed time-point. + """ + if not self.triggers_at_fixed_timepoint(): + raise NotImplementedError( + "This event does not trigger at a fixed timepoint." + ) + return self._t_root[0] diff --git a/python/sdist/amici/debugging/__init__.py b/python/sdist/amici/debugging/__init__.py new file mode 100644 index 0000000000..81663d17b9 --- /dev/null +++ b/python/sdist/amici/debugging/__init__.py @@ -0,0 +1,45 @@ +"""Functions for debugging AMICI simulation failures.""" +import amici +import numpy as np + + +def get_model_for_preeq(model: amici.Model, edata: amici.ExpData): + """Get a model set-up to simulate the preequilibration condition as + specified in `edata`. + + Useful for analyzing simulation errors during preequilibration. + Simulating the returned model will reproduce the behavior of + simulation-based preequilibration. + + Note that for models with events, the simulation results may differ. + During preequilibration, event-handling is disabled. However, when + simulating the returned model, event-handling will be enabled. + For events triggered at fixed timepoints, this can be avoided by setting + :meth:`t0 ` to a timepoints after the last trigger + timepoint. + + :param model: + The model for which *edata* was generated. + :param edata: + The experimental data object with a preequilibration condition + specified. + :return: + A copy of *model* with the same parameters, initial states, ... + as *amici_model* for the preequilibration condition. + Output timepoints are set to ``[inf]`` and will have to be adjusted. + """ + model = model.clone() + model.setTimepoints([np.inf]) + model.setFixedParameters(edata.fixedParametersPreequilibration) + if edata.pscale: + model.setParameterScale(edata.pscale) + if edata.parameters: + model.setParameters(edata.parameters) + if edata.plist: + model.setParameterList(edata.plist) + model.setInitialStates(edata.x0) + # has to be set *after* parameter list/scale! + model.setInitialStateSensitivities(edata.sx0) + model.setT0(edata.tstart_) + + return model diff --git a/python/sdist/amici/gradient_check.py b/python/sdist/amici/gradient_check.py index 27e2d671d3..c5ddb03749 100644 --- a/python/sdist/amici/gradient_check.py +++ b/python/sdist/amici/gradient_check.py @@ -6,7 +6,8 @@ """ import copy -from typing import Callable, List, Optional, Sequence +from typing import Optional +from collections.abc import Sequence import numpy as np @@ -29,7 +30,7 @@ def check_finite_difference( solver: Solver, edata: ExpData, ip: int, - fields: List[str], + fields: list[str], atol: Optional[float] = 1e-4, rtol: Optional[float] = 1e-4, epsilon: Optional[float] = 1e-3, @@ -331,7 +332,7 @@ def _check_results( """ result = rdata[field] - if type(result) is float: + if type(result) is float: # noqa E721 result = np.array(result) _check_close( diff --git a/python/sdist/amici/import_utils.py b/python/sdist/amici/import_utils.py index 77a2add60b..63a160c1de 100644 --- a/python/sdist/amici/import_utils.py +++ b/python/sdist/amici/import_utils.py @@ -7,14 +7,11 @@ from typing import ( Any, Callable, - Dict, - Iterable, Optional, - Sequence, SupportsFloat, - Tuple, Union, ) +from collections.abc import Iterable, Sequence import sympy as sp from sympy.functions.elementary.piecewise import ExprCondPair @@ -33,7 +30,7 @@ class SBMLException(Exception): pass -SymbolDef = Dict[sp.Symbol, Union[Dict[str, sp.Expr], sp.Expr]] +SymbolDef = dict[sp.Symbol, Union[dict[str, sp.Expr], sp.Expr]] # Monkey-patch toposort CircularDependencyError to handle non-sortable objects, @@ -44,13 +41,13 @@ def __init__(self, data): # error messages. That's convenient for doctests. s = "Circular dependencies exist among these items: {{{}}}".format( ", ".join( - "{!r}:{!r}".format(key, value) + f"{key!r}:{value!r}" for key, value in sorted( {str(k): v for k, v in data.items()}.items() ) ) ) - super(CircularDependencyError, self).__init__(s) + super().__init__(s) self.data = data @@ -72,7 +69,7 @@ class ObservableTransformation(str, enum.Enum): def noise_distribution_to_observable_transformation( - noise_distribution: Union[str, Callable] + noise_distribution: Union[str, Callable], ) -> ObservableTransformation: """ Parse noise distribution string and extract observable transformation @@ -93,7 +90,7 @@ def noise_distribution_to_observable_transformation( def noise_distribution_to_cost_function( - noise_distribution: Union[str, Callable] + noise_distribution: Union[str, Callable], ) -> Callable[[str], str]: """ Parse noise distribution string to a cost function definition amici can @@ -423,8 +420,8 @@ def _parse_special_functions(sym: sp.Expr, toplevel: bool = True) -> sp.Expr: def _denest_piecewise( - args: Sequence[Union[sp.Expr, sp.logic.boolalg.Boolean, bool]] -) -> Tuple[Union[sp.Expr, sp.logic.boolalg.Boolean, bool]]: + args: Sequence[Union[sp.Expr, sp.logic.boolalg.Boolean, bool]], +) -> tuple[Union[sp.Expr, sp.logic.boolalg.Boolean, bool]]: """ Denest piecewise functions that contain piecewise as condition @@ -548,7 +545,7 @@ def _parse_heaviside_trigger(trigger: sp.Expr) -> sp.Expr: def grouper( iterable: Iterable, n: int, fillvalue: Any = None -) -> Iterable[Tuple[Any]]: +) -> Iterable[tuple[Any]]: """ Collect data into fixed-length chunks or blocks @@ -734,5 +731,20 @@ def strip_pysb(symbol: sp.Basic) -> sp.Basic: return symbol +def unique_preserve_order(seq: Sequence) -> list: + """Return a list of unique elements in Sequence, keeping only the first + occurrence of each element + + Parameters: + seq: Sequence to prune + + Returns: + List of unique elements in ``seq`` + """ + seen = set() + seen_add = seen.add + return [x for x in seq if not (x in seen or seen_add(x))] + + sbml_time_symbol = symbol_with_assumptions("time") amici_time_symbol = symbol_with_assumptions("t") diff --git a/python/sdist/amici/logging.py b/python/sdist/amici/logging.py index 2648fc5b28..df39c4a219 100644 --- a/python/sdist/amici/logging.py +++ b/python/sdist/amici/logging.py @@ -34,24 +34,24 @@ def _setup_logger( level: Optional[int] = logging.WARNING, console_output: Optional[bool] = True, file_output: Optional[bool] = False, - capture_warnings: Optional[bool] = True, + capture_warnings: Optional[bool] = False, ) -> logging.Logger: """ - Set up a new logging.Logger for AMICI logging + Set up a new :class:`logging.Logger` for AMICI logging. :param level: - Logging level, typically using a constant like logging.INFO or - logging.DEBUG + Logging level, typically using a constant like :obj:`logging.INFO` or + :obj:`logging.DEBUG` :param console_output: - Set up a default console log handler if True (default) + Set up a default console log handler if ``True`` (default) :param file_output: Supply a filename to copy all log output to that file, or - set to False to disable (default) + set to ``False`` to disable (default) :param capture_warnings: - Capture warnings from Python's warnings module if True (default) + Capture warnings from Python's warnings module if ``True`` :return: A :class:`logging.Logger` object for AMICI logging. Note that other @@ -81,7 +81,12 @@ def _setup_logger( log.setLevel(level) + py_warn_logger = logging.getLogger("py.warnings") + # Remove default logging handler + for handler in log.handlers: + if handler in py_warn_logger.handlers: + py_warn_logger.removeHandler(handler) log.handlers = [] log_fmt = logging.Formatter( @@ -105,7 +110,10 @@ def _setup_logger( log.debug("Python version: %s", platform.python_version()) log.debug("Hostname: %s", socket.getfqdn()) - logging.captureWarnings(capture_warnings) + if capture_warnings: + logging.captureWarnings(capture_warnings) + for handler in log.handlers: + py_warn_logger.addHandler(handler) return log diff --git a/python/sdist/amici/numpy.py b/python/sdist/amici/numpy.py index b84e52cc2b..fdf802147c 100644 --- a/python/sdist/amici/numpy.py +++ b/python/sdist/amici/numpy.py @@ -6,7 +6,9 @@ import collections import copy -from typing import Dict, Iterator, List, Literal, Union +import itertools +from typing import Literal, Union +from collections.abc import Iterator import amici import numpy as np @@ -32,8 +34,8 @@ class is memory efficient as copies of the underlying C++ objects is """ _swigptr = None - _field_names: List[str] = [] - _field_dimensions: Dict[str, List[int]] = dict() + _field_names: list[str] = [] + _field_dimensions: dict[str, list[int]] = dict() def __getitem__(self, item: str) -> Union[np.ndarray, float]: """ @@ -79,7 +81,10 @@ def __getattr__(self, item) -> Union[np.ndarray, float]: :returns: value """ - return self.__getitem__(item) + try: + return self.__getitem__(item) + except KeyError as e: + raise AttributeError(item) from e def __init__(self, swigptr): """ @@ -89,7 +94,7 @@ def __init__(self, swigptr): """ self._swigptr = swigptr self._cache = {} - super(SwigPtrView, self).__init__() + super().__init__() def __len__(self) -> int: """ @@ -164,6 +169,13 @@ def __eq__(self, other): return False return self._swigptr == other._swigptr + def __dir__(self): + return sorted( + set( + itertools.chain(dir(super()), self.__dict__, self._field_names) + ) + ) + class ReturnDataView(SwigPtrView): """ @@ -237,7 +249,7 @@ def __init__(self, rdata: Union[ReturnDataPtr, ReturnData]): if not isinstance(rdata, (ReturnDataPtr, ReturnData)): raise TypeError( f"Unsupported pointer {type(rdata)}, must be" - f"amici.ExpDataPtr!" + f"amici.ReturnDataPtr or amici.ReturnData!" ) self._field_dimensions = { "ts": [rdata.nt], @@ -288,7 +300,7 @@ def __init__(self, rdata: Union[ReturnDataPtr, ReturnData]): "numerrtestfailsB": [rdata.nt], "numnonlinsolvconvfailsB": [rdata.nt], } - super(ReturnDataView, self).__init__(rdata) + super().__init__(rdata) def __getitem__( self, item: str @@ -406,11 +418,11 @@ def __init__(self, edata: Union[ExpDataPtr, ExpData]): edata.observedDataStdDev = edata.getObservedDataStdDev() edata.observedEvents = edata.getObservedEvents() edata.observedEventsStdDev = edata.getObservedEventsStdDev() - super(ExpDataView, self).__init__(edata) + super().__init__(edata) def _field_as_numpy( - field_dimensions: Dict[str, List[int]], field: str, data: SwigPtrView + field_dimensions: dict[str, list[int]], field: str, data: SwigPtrView ) -> Union[np.ndarray, float, None]: """ Convert data object field to numpy array with dimensions according to diff --git a/python/sdist/amici/pandas.py b/python/sdist/amici/pandas.py index 8a2eb5049d..745cbfb767 100644 --- a/python/sdist/amici/pandas.py +++ b/python/sdist/amici/pandas.py @@ -7,7 +7,7 @@ import copy import math -from typing import Dict, List, Optional, SupportsFloat, Union +from typing import Optional, SupportsFloat, Union import amici import numpy as np @@ -25,17 +25,17 @@ ] ExpDatas = Union[ - List[amici.amici.ExpData], - List[amici.ExpDataPtr], + list[amici.amici.ExpData], + list[amici.ExpDataPtr], amici.amici.ExpData, amici.ExpDataPtr, ] -ReturnDatas = Union[List[amici.ReturnDataView], amici.ReturnDataView] +ReturnDatas = Union[list[amici.ReturnDataView], amici.ReturnDataView] AmiciModel = Union[amici.ModelPtr, amici.Model] -def _process_edata_list(edata_list: ExpDatas) -> List[amici.amici.ExpData]: +def _process_edata_list(edata_list: ExpDatas) -> list[amici.amici.ExpData]: """ Maps single instances of :class:`amici.amici.ExpData` to lists of :class:`amici.amici.ExpData` @@ -52,7 +52,7 @@ def _process_edata_list(edata_list: ExpDatas) -> List[amici.amici.ExpData]: return edata_list -def _process_rdata_list(rdata_list: ReturnDatas) -> List[amici.ReturnDataView]: +def _process_rdata_list(rdata_list: ReturnDatas) -> list[amici.ReturnDataView]: """ Maps single instances of :class:`amici.ReturnData` to lists of :class:`amici.ReturnData` @@ -359,11 +359,11 @@ def getResidualsAsDataFrame( def _fill_conditions_dict( - datadict: Dict[str, float], + datadict: dict[str, float], model: AmiciModel, edata: amici.amici.ExpData, by_id: bool, -) -> Dict[str, float]: +) -> dict[str, float]: """ Helper function that fills in condition parameters from model and edata. @@ -413,7 +413,7 @@ def _fill_conditions_dict( return datadict -def _get_extended_observable_cols(model: AmiciModel, by_id: bool) -> List[str]: +def _get_extended_observable_cols(model: AmiciModel, by_id: bool) -> list[str]: """ Construction helper for extended observable dataframe headers. @@ -446,7 +446,7 @@ def _get_extended_observable_cols(model: AmiciModel, by_id: bool) -> List[str]: ) -def _get_observable_cols(model: AmiciModel, by_id: bool) -> List[str]: +def _get_observable_cols(model: AmiciModel, by_id: bool) -> list[str]: """ Construction helper for observable dataframe headers. @@ -475,7 +475,7 @@ def _get_observable_cols(model: AmiciModel, by_id: bool) -> List[str]: ) -def _get_state_cols(model: AmiciModel, by_id: bool) -> List[str]: +def _get_state_cols(model: AmiciModel, by_id: bool) -> list[str]: """ Construction helper for state dataframe headers. @@ -504,7 +504,7 @@ def _get_state_cols(model: AmiciModel, by_id: bool) -> List[str]: ) -def _get_expression_cols(model: AmiciModel, by_id: bool) -> List[str]: +def _get_expression_cols(model: AmiciModel, by_id: bool) -> list[str]: """Construction helper for expression dataframe headers. :param model: @@ -534,7 +534,7 @@ def _get_expression_cols(model: AmiciModel, by_id: bool) -> List[str]: def _get_names_or_ids( model: AmiciModel, variable: str, by_id: bool -) -> List[str]: +) -> list[str]: """ Obtains a unique list of identifiers for the specified variable. First tries model.getVariableNames and then uses model.getVariableIds. @@ -592,10 +592,10 @@ def _get_names_or_ids( def _get_specialized_fixed_parameters( model: AmiciModel, - condition: Union[Dict[str, SupportsFloat], pd.Series], - overwrite: Union[Dict[str, SupportsFloat], pd.Series], + condition: Union[dict[str, SupportsFloat], pd.Series], + overwrite: Union[dict[str, SupportsFloat], pd.Series], by_id: bool, -) -> List[float]: +) -> list[float]: """ Copies values in condition and overwrites them according to key value pairs specified in overwrite. @@ -730,7 +730,7 @@ def constructEdataFromDataFrame( def getEdataFromDataFrame( model: AmiciModel, df: pd.DataFrame, by_id: Optional[bool] = False -) -> List[amici.amici.ExpData]: +) -> list[amici.amici.ExpData]: """ Constructs a ExpData instances according to the provided Model and DataFrame. diff --git a/python/sdist/amici/parameter_mapping.py b/python/sdist/amici/parameter_mapping.py index f1cf75a150..b39d54c87e 100644 --- a/python/sdist/amici/parameter_mapping.py +++ b/python/sdist/amici/parameter_mapping.py @@ -1,457 +1,45 @@ -""" -Parameter mapping ------------------ - -When performing parameter inference, often parameters need to be mapped from -simulation to estimation parameters, and parameters can differ between -conditions. This can be handled using the `ParameterMapping`. - -Note -~~~~ +"""Parameter mapping between AMICI and PEtab. -While the parameter mapping can be used directly with AMICI, it was developed -for usage together with PEtab, for which the whole workflow of generating -the mapping is automatized. +.. deprecated:: 0.21.0 + Use :mod:`amici.petab.parameter_mapping` instead. """ -from __future__ import annotations -import numbers +# some extra imports for backward-compatibility import warnings -from collections.abc import Sequence -from itertools import chain -from typing import Any, Dict, List, Set, Union - -import amici -import numpy as np -from petab.C import * # noqa: F403 - -SingleParameterMapping = Dict[str, Union[numbers.Number, str]] -SingleScaleMapping = Dict[str, str] -AmiciModel = Union[amici.Model, amici.ModelPtr] - - -class ParameterMappingForCondition: - """Parameter mapping for condition. - - Contains mappings for free parameters, fixed parameters, and fixed - preequilibration parameters, both for parameters and scales. - - In the scale mappings, for each simulation parameter the scale - on which the value is passed (and potentially gradients are to be - returned) is given. In the parameter mappings, for each simulation - parameter a corresponding optimization parameter (or a numeric value) - is given. - - If a mapping is not passed, the parameter mappings are assumed to be empty, - and if a scale mapping is not passed, all scales are set to linear. - - :param map_sim_var: - Mapping for free simulation parameters. - :param scale_map_sim_var: - Scales for free simulation parameters. - :param map_preeq_fix: - Mapping for fixed preequilibration parameters. - :param scale_map_preeq_fix: - Scales for fixed preequilibration parameters. - :param map_sim_fix: - Mapping for fixed simulation parameters. - :param scale_map_sim_fix: - Scales for fixed simulation parameters. - """ - - def __init__( - self, - map_sim_var: SingleParameterMapping = None, - scale_map_sim_var: SingleScaleMapping = None, - map_preeq_fix: SingleParameterMapping = None, - scale_map_preeq_fix: SingleScaleMapping = None, - map_sim_fix: SingleParameterMapping = None, - scale_map_sim_fix: SingleScaleMapping = None, - ): - if map_sim_var is None: - map_sim_var = {} - self.map_sim_var = map_sim_var - - if scale_map_sim_var is None: - scale_map_sim_var = {key: LIN for key in map_sim_var} - self.scale_map_sim_var = scale_map_sim_var - - if map_preeq_fix is None: - map_preeq_fix = {} - self.map_preeq_fix = map_preeq_fix - - if scale_map_preeq_fix is None: - scale_map_preeq_fix = {key: LIN for key in map_preeq_fix} - self.scale_map_preeq_fix = scale_map_preeq_fix - - if map_sim_fix is None: - map_sim_fix = {} - self.map_sim_fix = map_sim_fix - - if scale_map_sim_fix is None: - scale_map_sim_fix = {key: LIN for key in map_sim_fix} - self.scale_map_sim_fix = scale_map_sim_fix - - def __repr__(self): - return ( - f"{self.__class__.__name__}(" - f"map_sim_var={repr(self.map_sim_var)}," - f"scale_map_sim_var={repr(self.scale_map_sim_var)}," - f"map_preeq_fix={repr(self.map_preeq_fix)}," - f"scale_map_preeq_fix={repr(self.scale_map_preeq_fix)}," - f"map_sim_fix={repr(self.map_sim_fix)}," - f"scale_map_sim_fix={repr(self.scale_map_sim_fix)})" - ) - - @property - def free_symbols(self) -> Set[str]: - """Get IDs of all (symbolic) parameters present in this mapping""" - return { - p - for p in chain( - self.map_sim_var.values(), - self.map_preeq_fix.values(), - self.map_sim_fix.values(), - ) - if isinstance(p, str) - } - - -class ParameterMapping(Sequence): - r"""Parameter mapping for multiple conditions. - - This can be used like a list of :class:`ParameterMappingForCondition`\ s. - - :param parameter_mappings: - List of parameter mappings for specific conditions. - """ - - def __init__( - self, parameter_mappings: List[ParameterMappingForCondition] = None - ): - super().__init__() - if parameter_mappings is None: - parameter_mappings = [] - self.parameter_mappings = parameter_mappings - - def __iter__(self): - yield from self.parameter_mappings - - def __getitem__( - self, item - ) -> Union[ParameterMapping, ParameterMappingForCondition]: - result = self.parameter_mappings[item] - if isinstance(result, ParameterMappingForCondition): - return result - return ParameterMapping(result) - - def __len__(self): - return len(self.parameter_mappings) - - def append( - self, parameter_mapping_for_condition: ParameterMappingForCondition - ): - """Append a condition specific parameter mapping.""" - self.parameter_mappings.append(parameter_mapping_for_condition) - - def __repr__(self): - return f"{self.__class__.__name__}({repr(self.parameter_mappings)})" - - @property - def free_symbols(self) -> Set[str]: - """Get IDs of all (symbolic) parameters present in this mapping""" - return set.union(*(mapping.free_symbols for mapping in self)) - - -def fill_in_parameters( - edatas: List[amici.ExpData], - problem_parameters: Dict[str, numbers.Number], - scaled_parameters: bool, - parameter_mapping: ParameterMapping, - amici_model: AmiciModel, -) -> None: - """Fill fixed and dynamic parameters into the edatas (in-place). - - :param edatas: - List of experimental datas :class:`amici.amici.ExpData` with - everything except parameters filled. - :param problem_parameters: - Problem parameters as parameterId=>value dict. Only - parameters included here will be set. Remaining parameters will - be used as currently set in `amici_model`. - :param scaled_parameters: - If True, problem_parameters are assumed to be on the scale provided - in the parameter mapping. If False, they are assumed - to be in linear scale. - :param parameter_mapping: - Parameter mapping for all conditions. - :param amici_model: - AMICI model. - """ - if unused_parameters := ( - set(problem_parameters.keys()) - parameter_mapping.free_symbols - ): - warnings.warn( - "The following problem parameters were not used: " - + str(unused_parameters), - RuntimeWarning, - ) - - for edata, mapping_for_condition in zip(edatas, parameter_mapping): - fill_in_parameters_for_condition( - edata, - problem_parameters, - scaled_parameters, - mapping_for_condition, - amici_model, - ) - - -def fill_in_parameters_for_condition( - edata: amici.ExpData, - problem_parameters: Dict[str, numbers.Number], - scaled_parameters: bool, - parameter_mapping: ParameterMappingForCondition, - amici_model: AmiciModel, -) -> None: - """Fill fixed and dynamic parameters into the edata for condition - (in-place). - - :param edata: - Experimental data object to fill parameters into. - :param problem_parameters: - Problem parameters as parameterId=>value dict. Only - parameters included here will be set. Remaining parameters will - be used as already set in `amici_model` and `edata`. - :param scaled_parameters: - If True, problem_parameters are assumed to be on the scale provided - in the parameter mapping. If False, they - are assumed to be in linear scale. - :param parameter_mapping: - Parameter mapping for current condition. - :param amici_model: - AMICI model - """ - map_sim_var = parameter_mapping.map_sim_var - scale_map_sim_var = parameter_mapping.scale_map_sim_var - map_preeq_fix = parameter_mapping.map_preeq_fix - scale_map_preeq_fix = parameter_mapping.scale_map_preeq_fix - map_sim_fix = parameter_mapping.map_sim_fix - scale_map_sim_fix = parameter_mapping.scale_map_sim_fix - - # Parameter mapping may contain parameter_ids as values, these *must* - # be replaced - - def _get_par(model_par, value, mapping): - """Replace parameter IDs in mapping dicts by values from - problem_parameters where necessary""" - if isinstance(value, str): - try: - # estimated parameter - return problem_parameters[value] - except KeyError: - # condition table overrides must have been handled already, - # e.g. by the PEtab parameter mapping, but parameters from - # InitialAssignments may still be present. - if mapping[value] == model_par: - # prevent infinite recursion - raise - return _get_par(value, mapping[value], mapping) - if model_par in problem_parameters: - # user-provided - return problem_parameters[model_par] - # prevent nan-propagation in derivative - if np.isnan(value): - return 0.0 - # constant value - return value - - map_preeq_fix = { - key: _get_par(key, val, map_preeq_fix) - for key, val in map_preeq_fix.items() - } - map_sim_fix = { - key: _get_par(key, val, map_sim_fix) - for key, val in map_sim_fix.items() - } - map_sim_var = { - key: _get_par(key, val, dict(map_sim_fix, **map_sim_var)) - for key, val in map_sim_var.items() - } - - # If necessary, (un)scale parameters - if scaled_parameters: - unscale_parameters_dict(map_preeq_fix, scale_map_preeq_fix) - unscale_parameters_dict(map_sim_fix, scale_map_sim_fix) - if not scaled_parameters: - # We scale all parameters to the scale they are estimated on, and pass - # that information to amici via edata.{parameters,pscale}. - # The scaling is necessary to obtain correct derivatives. - scale_parameters_dict(map_sim_var, scale_map_sim_var) - # We can skip preequilibration parameters, because they are identical - # with simulation parameters, and only the latter are used from here - # on. - - ########################################################################## - # variable parameters and parameter scale - - # parameter list from mapping dict - parameters = [ - map_sim_var[par_id] for par_id in amici_model.getParameterIds() - ] - - # scales list from mapping dict - scales = [ - petab_to_amici_scale(scale_map_sim_var[par_id]) - for par_id in amici_model.getParameterIds() - ] - - # plist - plist = [ - ip - for ip, par_id in enumerate(amici_model.getParameterIds()) - if isinstance(parameter_mapping.map_sim_var[par_id], str) - ] - - if parameters: - edata.parameters = np.asarray(parameters, dtype=float) - - if scales: - edata.pscale = amici.parameterScalingFromIntVector(scales) - - if plist: - edata.plist = plist - - ########################################################################## - # fixed parameters preequilibration - if map_preeq_fix: - fixed_pars_preeq = [ - map_preeq_fix[par_id] - for par_id in amici_model.getFixedParameterIds() - ] - edata.fixedParametersPreequilibration = fixed_pars_preeq - - ########################################################################## - # fixed parameters simulation - if map_sim_fix: - fixed_pars_sim = [ - map_sim_fix[par_id] - for par_id in amici_model.getFixedParameterIds() - ] - edata.fixedParameters = fixed_pars_sim - - -def petab_to_amici_scale(petab_scale: str) -> int: - """Convert petab scale id to amici scale id.""" - if petab_scale == LIN: - return amici.ParameterScaling_none - if petab_scale == LOG10: - return amici.ParameterScaling_log10 - if petab_scale == LOG: - return amici.ParameterScaling_ln - raise ValueError(f"PEtab scale not recognized: {petab_scale}") - - -def amici_to_petab_scale(amici_scale: int) -> str: - """Convert amici scale id to petab scale id.""" - if amici_scale == amici.ParameterScaling_none: - return LIN - if amici_scale == amici.ParameterScaling_log10: - return LOG10 - if amici_scale == amici.ParameterScaling_ln: - return LOG - raise ValueError(f"AMICI scale not recognized: {amici_scale}") - - -def scale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number: - """Bring parameter from linear scale to target scale. - - :param value: - Value to scale - :param petab_scale: - Target scale of ``value`` - - :return: - ``value`` on target scale - """ - if petab_scale == LIN: - return value - if petab_scale == LOG10: - return np.log10(value) - if petab_scale == LOG: - return np.log(value) - raise ValueError( - f"Unknown parameter scale {petab_scale}. " - f"Must be from {(LIN, LOG, LOG10)}" - ) - - -def unscale_parameter( - value: numbers.Number, petab_scale: str -) -> numbers.Number: - """Bring parameter from scale to linear scale. - - :param value: - Value to scale - :param petab_scale: - Target scale of ``value`` - - :return: - ``value`` on linear scale - """ - if petab_scale == LIN: - return value - if petab_scale == LOG10: - return np.power(10, value) - if petab_scale == LOG: - return np.exp(value) - raise ValueError( - f"Unknown parameter scale {petab_scale}. " - f"Must be from {(LIN, LOG, LOG10)}" - ) - - -def scale_parameters_dict( - value_dict: Dict[Any, numbers.Number], petab_scale_dict: Dict[Any, str] -) -> None: - """ - Bring parameters from linear scale to target scale. - - Bring values in ``value_dict`` from linear scale to the scale - provided in ``petab_scale_dict`` (in-place). - Both arguments are expected to have the same length and matching keys. - - :param value_dict: - Values to scale - - :param petab_scale_dict: - Target scales of ``values`` - """ - if value_dict.keys() != petab_scale_dict.keys(): - raise AssertionError("Keys don't match.") - - for key, value in value_dict.items(): - value_dict[key] = scale_parameter(value, petab_scale_dict[key]) - - -def unscale_parameters_dict( - value_dict: Dict[Any, numbers.Number], petab_scale_dict: Dict[Any, str] -) -> None: - """ - Bring parameters from target scale to linear scale. - - Bring values in ``value_dict`` from linear scale to the scale - provided in ``petab_scale_dict`` (in-place). - Both arguments are expected to have the same length and matching keys. - - :param value_dict: - Values to scale - - :param petab_scale_dict: - Target scales of ``values`` - """ - if value_dict.keys() != petab_scale_dict.keys(): - raise AssertionError("Keys don't match.") - for key, value in value_dict.items(): - value_dict[key] = unscale_parameter(value, petab_scale_dict[key]) +from .petab.conditions import ( # noqa # pylint: disable=unused-import + fill_in_parameters, + fill_in_parameters_for_condition, +) +from .petab.parameter_mapping import ( # noqa # pylint: disable=unused-import + ParameterMapping, + ParameterMappingForCondition, + SingleParameterMapping, + SingleScaleMapping, + amici_to_petab_scale, + petab_to_amici_scale, + scale_parameter, + scale_parameters_dict, + unscale_parameter, + unscale_parameters_dict, +) + +warnings.warn( + "Importing amici.parameter_mapping is deprecated. Use `amici.petab.parameter_mapping` instead.", + DeprecationWarning, +) + +__all__ = [ + "fill_in_parameters", + "fill_in_parameters_for_condition", + "ParameterMapping", + "ParameterMappingForCondition", + "SingleParameterMapping", + "SingleScaleMapping", + "amici_to_petab_scale", + "petab_to_amici_scale", + "scale_parameter", + "scale_parameters_dict", + "unscale_parameter", + "unscale_parameters_dict", +] diff --git a/python/sdist/amici/petab/__init__.py b/python/sdist/amici/petab/__init__.py new file mode 100644 index 0000000000..6d2201ce3b --- /dev/null +++ b/python/sdist/amici/petab/__init__.py @@ -0,0 +1,35 @@ +"""PEtab import related code.""" + +# ID of model parameter that is to be added to SBML model to indicate +# preequilibration +PREEQ_INDICATOR_ID = "preequilibration_indicator" + +from .petab_import import import_petab_problem +from .simulations import ( + EDATAS, + FIM, + LLH, + RDATAS, + RES, + S2LLH, + SLLH, + SRES, + rdatas_to_measurement_df, + rdatas_to_simulation_df, + simulate_petab, +) + +__all__ = [ + "import_petab_problem", + "simulate_petab", + "rdatas_to_simulation_df", + "rdatas_to_measurement_df", + "LLH", + "SLLH", + "FIM", + "S2LLH", + "RES", + "SRES", + "RDATAS", + "EDATAS", +] diff --git a/python/sdist/amici/petab/cli/__init__.py b/python/sdist/amici/petab/cli/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/sdist/amici/petab/cli/import_petab.py b/python/sdist/amici/petab/cli/import_petab.py new file mode 100644 index 0000000000..db600b0590 --- /dev/null +++ b/python/sdist/amici/petab/cli/import_petab.py @@ -0,0 +1,161 @@ +import argparse + +import petab + +from ..petab_import import import_model_sbml + + +def _parse_cli_args(): + """ + Parse command line arguments + + :return: + Parsed CLI arguments from :mod:`argparse`. + """ + parser = argparse.ArgumentParser( + description="Import PEtab-format model into AMICI." + ) + + # General options: + parser.add_argument( + "-v", + "--verbose", + dest="verbose", + action="store_true", + help="More verbose output", + ) + parser.add_argument( + "-o", + "--output-dir", + dest="model_output_dir", + help="Name of the model directory to create", + ) + parser.add_argument( + "--no-compile", + action="store_false", + dest="compile", + help="Only generate model code, do not compile", + ) + parser.add_argument( + "--no-validate", + action="store_false", + dest="validate", + help="Skip validation of PEtab files", + ) + parser.add_argument( + "--flatten", + dest="flatten", + default=False, + action="store_true", + help="Flatten measurement specific overrides of " + "observable and noise parameters", + ) + parser.add_argument( + "--no-sensitivities", + dest="generate_sensitivity_code", + default=True, + action="store_false", + help="Skip generation of sensitivity code", + ) + + # Call with set of files + parser.add_argument( + "-s", "--sbml", dest="sbml_file_name", help="SBML model filename" + ) + parser.add_argument( + "-m", + "--measurements", + dest="measurement_file_name", + help="Measurement table", + ) + parser.add_argument( + "-c", + "--conditions", + dest="condition_file_name", + help="Conditions table", + ) + parser.add_argument( + "-p", + "--parameters", + dest="parameter_file_name", + help="Parameter table", + ) + parser.add_argument( + "-b", + "--observables", + dest="observable_file_name", + help="Observable table", + ) + + parser.add_argument( + "-y", + "--yaml", + dest="yaml_file_name", + help="PEtab YAML problem filename", + ) + + parser.add_argument( + "-n", + "--model-name", + dest="model_name", + help="Name of the python module generated for the " "model", + ) + + args = parser.parse_args() + + if not args.yaml_file_name and not all( + ( + args.sbml_file_name, + args.condition_file_name, + args.observable_file_name, + ) + ): + parser.error( + "When not specifying a model name or YAML file, then " + "SBML, condition and observable file must be specified" + ) + + return args + + +def _main(): + """ + Command line interface to import a model in the PEtab + (https://github.com/PEtab-dev/PEtab/) format into AMICI. + """ + args = _parse_cli_args() + + if args.yaml_file_name: + pp = petab.Problem.from_yaml(args.yaml_file_name) + else: + pp = petab.Problem.from_files( + sbml_file=args.sbml_file_name, + condition_file=args.condition_file_name, + measurement_file=args.measurement_file_name, + parameter_file=args.parameter_file_name, + observable_files=args.observable_file_name, + ) + + # Check for valid PEtab before potentially modifying it + if args.validate: + petab.lint_problem(pp) + + if args.flatten: + petab.flatten_timepoint_specific_output_overrides(pp) + + import_model_sbml( + model_name=args.model_name, + sbml_model=pp.sbml_model, + condition_table=pp.condition_df, + observable_table=pp.observable_df, + measurement_table=pp.measurement_df, + model_output_dir=args.model_output_dir, + compile=args.compile, + generate_sensitivity_code=args.generate_sensitivity_code, + verbose=args.verbose, + validate=False, + ) + + +if __name__ == "__main__": + _main() diff --git a/python/sdist/amici/petab/conditions.py b/python/sdist/amici/petab/conditions.py new file mode 100644 index 0000000000..34dd44cbcb --- /dev/null +++ b/python/sdist/amici/petab/conditions.py @@ -0,0 +1,521 @@ +"""PEtab conditions to AMICI ExpDatas.""" +import logging +import numbers +import warnings +from typing import Union +from collections.abc import Sequence + +import amici +import numpy as np +import pandas as pd +import petab +from amici import AmiciModel +from petab.C import ( + MEASUREMENT, + NOISE_PARAMETERS, + OBSERVABLE_ID, + PREEQUILIBRATION_CONDITION_ID, + SIMULATION_CONDITION_ID, + TIME, +) + +from .parameter_mapping import ( + ParameterMapping, + ParameterMappingForCondition, + petab_to_amici_scale, + scale_parameters_dict, + unscale_parameters_dict, +) +from .util import get_states_in_condition_table + +logger = logging.getLogger(__name__) + +SingleParameterMapping = dict[str, Union[numbers.Number, str]] +SingleScaleMapping = dict[str, str] + + +def fill_in_parameters( + edatas: list[amici.ExpData], + problem_parameters: dict[str, numbers.Number], + scaled_parameters: bool, + parameter_mapping: ParameterMapping, + amici_model: AmiciModel, +) -> None: + """Fill fixed and dynamic parameters into the edatas (in-place). + + :param edatas: + List of experimental datas :class:`amici.amici.ExpData` with + everything except parameters filled. + :param problem_parameters: + Problem parameters as parameterId=>value dict. Only + parameters included here will be set. Remaining parameters will + be used as currently set in `amici_model`. + :param scaled_parameters: + If True, problem_parameters are assumed to be on the scale provided + in the parameter mapping. If False, they are assumed + to be in linear scale. + :param parameter_mapping: + Parameter mapping for all conditions. + :param amici_model: + AMICI model. + """ + if unused_parameters := ( + set(problem_parameters.keys()) - parameter_mapping.free_symbols + ): + warnings.warn( + "The following problem parameters were not used: " + + str(unused_parameters), + RuntimeWarning, + ) + + for edata, mapping_for_condition in zip(edatas, parameter_mapping): + fill_in_parameters_for_condition( + edata, + problem_parameters, + scaled_parameters, + mapping_for_condition, + amici_model, + ) + + +def fill_in_parameters_for_condition( + edata: amici.ExpData, + problem_parameters: dict[str, numbers.Number], + scaled_parameters: bool, + parameter_mapping: ParameterMappingForCondition, + amici_model: AmiciModel, +) -> None: + """Fill fixed and dynamic parameters into the edata for condition + (in-place). + + :param edata: + Experimental data object to fill parameters into. + :param problem_parameters: + Problem parameters as parameterId=>value dict. Only + parameters included here will be set. Remaining parameters will + be used as already set in `amici_model` and `edata`. + :param scaled_parameters: + If True, problem_parameters are assumed to be on the scale provided + in the parameter mapping. If False, they + are assumed to be in linear scale. + :param parameter_mapping: + Parameter mapping for current condition. + :param amici_model: + AMICI model + """ + map_sim_var = parameter_mapping.map_sim_var + scale_map_sim_var = parameter_mapping.scale_map_sim_var + map_preeq_fix = parameter_mapping.map_preeq_fix + scale_map_preeq_fix = parameter_mapping.scale_map_preeq_fix + map_sim_fix = parameter_mapping.map_sim_fix + scale_map_sim_fix = parameter_mapping.scale_map_sim_fix + + # Parameter mapping may contain parameter_ids as values, these *must* + # be replaced + + def _get_par(model_par, value, mapping): + """Replace parameter IDs in mapping dicts by values from + problem_parameters where necessary""" + if isinstance(value, str): + try: + # estimated parameter + return problem_parameters[value] + except KeyError: + # condition table overrides must have been handled already, + # e.g. by the PEtab parameter mapping, but parameters from + # InitialAssignments may still be present. + if mapping[value] == model_par: + # prevent infinite recursion + raise + return _get_par(value, mapping[value], mapping) + if model_par in problem_parameters: + # user-provided + return problem_parameters[model_par] + # prevent nan-propagation in derivative + if np.isnan(value): + return 0.0 + # constant value + return value + + map_preeq_fix = { + key: _get_par(key, val, map_preeq_fix) + for key, val in map_preeq_fix.items() + } + map_sim_fix = { + key: _get_par(key, val, map_sim_fix) + for key, val in map_sim_fix.items() + } + map_sim_var = { + key: _get_par(key, val, dict(map_sim_fix, **map_sim_var)) + for key, val in map_sim_var.items() + } + + # If necessary, (un)scale parameters + if scaled_parameters: + unscale_parameters_dict(map_preeq_fix, scale_map_preeq_fix) + unscale_parameters_dict(map_sim_fix, scale_map_sim_fix) + if not scaled_parameters: + # We scale all parameters to the scale they are estimated on, and pass + # that information to amici via edata.{parameters,pscale}. + # The scaling is necessary to obtain correct derivatives. + scale_parameters_dict(map_sim_var, scale_map_sim_var) + # We can skip preequilibration parameters, because they are identical + # with simulation parameters, and only the latter are used from here + # on. + + ########################################################################## + # variable parameters and parameter scale + + # parameter list from mapping dict + parameters = [ + map_sim_var[par_id] for par_id in amici_model.getParameterIds() + ] + + # scales list from mapping dict + scales = [ + petab_to_amici_scale(scale_map_sim_var[par_id]) + for par_id in amici_model.getParameterIds() + ] + + # plist + plist = [ + ip + for ip, par_id in enumerate(amici_model.getParameterIds()) + if isinstance(parameter_mapping.map_sim_var[par_id], str) + ] + + if parameters: + edata.parameters = np.asarray(parameters, dtype=float) + + if scales: + edata.pscale = amici.parameterScalingFromIntVector(scales) + + if plist: + edata.plist = plist + + ########################################################################## + # fixed parameters preequilibration + if map_preeq_fix: + fixed_pars_preeq = [ + map_preeq_fix[par_id] + for par_id in amici_model.getFixedParameterIds() + ] + edata.fixedParametersPreequilibration = fixed_pars_preeq + + ########################################################################## + # fixed parameters simulation + if map_sim_fix: + fixed_pars_sim = [ + map_sim_fix[par_id] + for par_id in amici_model.getFixedParameterIds() + ] + edata.fixedParameters = fixed_pars_sim + + +def create_parameterized_edatas( + amici_model: AmiciModel, + petab_problem: petab.Problem, + problem_parameters: dict[str, numbers.Number], + scaled_parameters: bool = False, + parameter_mapping: ParameterMapping = None, + simulation_conditions: Union[pd.DataFrame, dict] = None, +) -> list[amici.ExpData]: + """Create list of :class:amici.ExpData objects with parameters filled in. + + :param amici_model: + AMICI Model assumed to be compatible with ``petab_problem``. + :param petab_problem: + PEtab problem to work on. + :param problem_parameters: + Run simulation with these parameters. If ``None``, PEtab + ``nominalValues`` will be used. To be provided as dict, mapping PEtab + problem parameters to SBML IDs. + :param scaled_parameters: + If ``True``, ``problem_parameters`` are assumed to be on the scale + provided in the PEtab parameter table and will be unscaled. + If ``False``, they are assumed to be in linear scale. + :param parameter_mapping: + Optional precomputed PEtab parameter mapping for efficiency, as + generated by :func:`create_parameter_mapping`. + :param simulation_conditions: + Result of :func:`petab.get_simulation_conditions`. Can be provided to + save time if this has been obtained before. + + :return: + List with one :class:`amici.amici.ExpData` per simulation condition, + with filled in timepoints, data and parameters. + """ + # number of amici simulations will be number of unique + # (preequilibrationConditionId, simulationConditionId) pairs. + # Can be optimized by checking for identical condition vectors. + if simulation_conditions is None: + simulation_conditions = ( + petab_problem.get_simulation_conditions_from_measurement_df() + ) + + # Get parameter mapping + if parameter_mapping is None: + from .parameter_mapping import create_parameter_mapping + + parameter_mapping = create_parameter_mapping( + petab_problem=petab_problem, + simulation_conditions=simulation_conditions, + scaled_parameters=scaled_parameters, + amici_model=amici_model, + ) + + # Generate ExpData with all condition-specific information + edatas = create_edatas( + amici_model=amici_model, + petab_problem=petab_problem, + simulation_conditions=simulation_conditions, + ) + + # Fill parameters in ExpDatas (in-place) + fill_in_parameters( + edatas=edatas, + problem_parameters=problem_parameters, + scaled_parameters=scaled_parameters, + parameter_mapping=parameter_mapping, + amici_model=amici_model, + ) + + return edatas + + +def create_edata_for_condition( + condition: Union[dict, pd.Series], + measurement_df: pd.DataFrame, + amici_model: AmiciModel, + petab_problem: petab.Problem, + observable_ids: list[str], +) -> amici.ExpData: + """Get :class:`amici.amici.ExpData` for the given PEtab condition. + + Sets timepoints, observed data and sigmas. + + :param condition: + :class:`pandas.DataFrame` row with ``preequilibrationConditionId`` and + ``simulationConditionId``. + :param measurement_df: + :class:`pandas.DataFrame` with measurements for the given condition. + :param amici_model: + AMICI model + :param petab_problem: + Underlying PEtab problem + :param observable_ids: + List of observable IDs + + :return: + ExpData instance. + """ + if amici_model.nytrue != len(observable_ids): + raise AssertionError( + "Number of AMICI model observables does not " + "match number of PEtab observables." + ) + + # create an ExpData object + edata = amici.ExpData(amici_model) + edata.id = condition[SIMULATION_CONDITION_ID] + if condition.get(PREEQUILIBRATION_CONDITION_ID): + edata.id += "+" + condition.get(PREEQUILIBRATION_CONDITION_ID) + ########################################################################## + # enable initial parameters reinitialization + + states_in_condition_table = get_states_in_condition_table( + petab_problem, condition=condition + ) + if ( + condition.get(PREEQUILIBRATION_CONDITION_ID) + and states_in_condition_table + ): + state_ids = amici_model.getStateIds() + state_idx_reinitalization = [ + state_ids.index(s) + for s, (v, v_preeq) in states_in_condition_table.items() + if not np.isnan(v) + ] + edata.reinitialization_state_idxs_sim = state_idx_reinitalization + logger.debug( + "Enabling state reinitialization for condition " + f"{condition.get(PREEQUILIBRATION_CONDITION_ID, '')} - " + f"{condition.get(SIMULATION_CONDITION_ID)} " + f"{states_in_condition_table}" + ) + + ########################################################################## + # timepoints + + # find replicate numbers of time points + timepoints_w_reps = _get_timepoints_with_replicates( + df_for_condition=measurement_df + ) + edata.setTimepoints(timepoints_w_reps) + + ########################################################################## + # measurements and sigmas + y, sigma_y = _get_measurements_and_sigmas( + df_for_condition=measurement_df, + timepoints_w_reps=timepoints_w_reps, + observable_ids=observable_ids, + ) + edata.setObservedData(y.flatten()) + edata.setObservedDataStdDev(sigma_y.flatten()) + + return edata + + +def create_edatas( + amici_model: AmiciModel, + petab_problem: petab.Problem, + simulation_conditions: Union[pd.DataFrame, dict] = None, +) -> list[amici.ExpData]: + """Create list of :class:`amici.amici.ExpData` objects for PEtab problem. + + :param amici_model: + AMICI model. + :param petab_problem: + Underlying PEtab problem. + :param simulation_conditions: + Result of :func:`petab.get_simulation_conditions`. Can be provided to + save time if this has be obtained before. + + :return: + List with one :class:`amici.amici.ExpData` per simulation condition, + with filled in timepoints and data. + """ + if simulation_conditions is None: + simulation_conditions = ( + petab_problem.get_simulation_conditions_from_measurement_df() + ) + + observable_ids = amici_model.getObservableIds() + + measurement_groupvar = [SIMULATION_CONDITION_ID] + if PREEQUILIBRATION_CONDITION_ID in simulation_conditions: + measurement_groupvar.append(petab.PREEQUILIBRATION_CONDITION_ID) + measurement_dfs = dict( + list( + petab_problem.measurement_df.fillna( + {PREEQUILIBRATION_CONDITION_ID: ""} + ).groupby(measurement_groupvar) + ) + ) + + edatas = [] + for _, condition in simulation_conditions.iterrows(): + # Create amici.ExpData for each simulation + if PREEQUILIBRATION_CONDITION_ID in condition: + measurement_index = ( + condition.get(SIMULATION_CONDITION_ID), + condition.get(PREEQUILIBRATION_CONDITION_ID) or "", + ) + else: + measurement_index = (condition.get(SIMULATION_CONDITION_ID),) + + edata = create_edata_for_condition( + condition=condition, + amici_model=amici_model, + measurement_df=measurement_dfs[measurement_index], + petab_problem=petab_problem, + observable_ids=observable_ids, + ) + edatas.append(edata) + + return edatas + + +def _get_timepoints_with_replicates( + df_for_condition: pd.DataFrame, +) -> list[numbers.Number]: + """ + Get list of timepoints including replicate measurements + + :param df_for_condition: + PEtab measurement table subset for a single condition. + + :return: + Sorted list of timepoints, including multiple timepoints accounting + for replicate measurements. + """ + # create sorted list of all timepoints for which measurements exist + timepoints = sorted(df_for_condition[TIME].unique().astype(float)) + + # find replicate numbers of time points + timepoints_w_reps = [] + for time in timepoints: + # subselect for time + df_for_time = df_for_condition[ + df_for_condition.time.astype(float) == time + ] + # rep number is maximum over rep numbers for observables + n_reps = max(df_for_time.groupby([OBSERVABLE_ID, TIME]).size()) + # append time point n_rep times + timepoints_w_reps.extend([time] * n_reps) + + return timepoints_w_reps + + +def _get_measurements_and_sigmas( + df_for_condition: pd.DataFrame, + timepoints_w_reps: Sequence[numbers.Number], + observable_ids: Sequence[str], +) -> tuple[np.array, np.array]: + """ + Get measurements and sigmas + + Generate arrays with measurements and sigmas in AMICI format from a + PEtab measurement table subset for a single condition. + + :param df_for_condition: + Subset of PEtab measurement table for one condition + + :param timepoints_w_reps: + Timepoints for which there exist measurements, including replicates + + :param observable_ids: + List of observable IDs for mapping IDs to indices. + + :return: + arrays for measurement and sigmas + """ + # prepare measurement matrix + y = np.full( + shape=(len(timepoints_w_reps), len(observable_ids)), fill_value=np.nan + ) + # prepare sigma matrix + sigma_y = y.copy() + + timepoints = sorted(df_for_condition[TIME].unique().astype(float)) + + for time in timepoints: + # subselect for time + df_for_time = df_for_condition[df_for_condition[TIME] == time] + time_ix_0 = timepoints_w_reps.index(time) + + # remember used time indices for each observable + time_ix_for_obs_ix = {} + + # iterate over measurements + for _, measurement in df_for_time.iterrows(): + # extract observable index + observable_ix = observable_ids.index(measurement[OBSERVABLE_ID]) + + # update time index for observable + if observable_ix in time_ix_for_obs_ix: + time_ix_for_obs_ix[observable_ix] += 1 + else: + time_ix_for_obs_ix[observable_ix] = time_ix_0 + + # fill observable and possibly noise parameter + y[time_ix_for_obs_ix[observable_ix], observable_ix] = measurement[ + MEASUREMENT + ] + if isinstance( + measurement.get(NOISE_PARAMETERS, None), numbers.Number + ): + sigma_y[ + time_ix_for_obs_ix[observable_ix], observable_ix + ] = measurement[NOISE_PARAMETERS] + return y, sigma_y diff --git a/python/sdist/amici/petab/import_helpers.py b/python/sdist/amici/petab/import_helpers.py new file mode 100644 index 0000000000..3caf951ace --- /dev/null +++ b/python/sdist/amici/petab/import_helpers.py @@ -0,0 +1,272 @@ +"""General helper functions for PEtab import. + +Functions for PEtab import that are independent of the model format. +""" +import importlib +import logging +import os +import re +from pathlib import Path +from typing import Union + +import amici +import pandas as pd +import petab +import sympy as sp +from petab.C import ( + CONDITION_NAME, + ESTIMATE, + NOISE_DISTRIBUTION, + NOISE_FORMULA, + OBSERVABLE_FORMULA, + OBSERVABLE_NAME, + OBSERVABLE_TRANSFORMATION, +) +from petab.parameters import get_valid_parameters_for_parameter_table +from sympy.abc import _clash + +logger = logging.getLogger(__name__) + + +def get_observation_model( + observable_df: pd.DataFrame, +) -> tuple[ + dict[str, dict[str, str]], dict[str, str], dict[str, Union[str, float]] +]: + """ + Get observables, sigmas, and noise distributions from PEtab observation + table in a format suitable for + :meth:`amici.sbml_import.SbmlImporter.sbml2amici`. + + :param observable_df: + PEtab observables table + + :return: + Tuple of dicts with observables, noise distributions, and sigmas. + """ + if observable_df is None: + return {}, {}, {} + + observables = {} + sigmas = {} + + nan_pat = r"^[nN]a[nN]$" + for _, observable in observable_df.iterrows(): + oid = str(observable.name) + # need to sanitize due to https://github.com/PEtab-dev/PEtab/issues/447 + name = re.sub(nan_pat, "", str(observable.get(OBSERVABLE_NAME, ""))) + formula_obs = re.sub(nan_pat, "", str(observable[OBSERVABLE_FORMULA])) + formula_noise = re.sub(nan_pat, "", str(observable[NOISE_FORMULA])) + observables[oid] = {"name": name, "formula": formula_obs} + sigmas[oid] = formula_noise + + # PEtab does currently not allow observables in noiseFormula and AMICI + # cannot handle states in sigma expressions. Therefore, where possible, + # replace species occurring in error model definition by observableIds. + replacements = { + sp.sympify(observable["formula"], locals=_clash): sp.Symbol( + observable_id + ) + for observable_id, observable in observables.items() + } + for observable_id, formula in sigmas.items(): + repl = sp.sympify(formula, locals=_clash).subs(replacements) + sigmas[observable_id] = str(repl) + + noise_distrs = petab_noise_distributions_to_amici(observable_df) + + return observables, noise_distrs, sigmas + + +def petab_noise_distributions_to_amici( + observable_df: pd.DataFrame, +) -> dict[str, str]: + """ + Map from the petab to the amici format of noise distribution + identifiers. + + :param observable_df: + PEtab observable table + + :return: + dictionary of observable_id => AMICI noise-distributions + """ + amici_distrs = {} + for _, observable in observable_df.iterrows(): + amici_val = "" + + if ( + OBSERVABLE_TRANSFORMATION in observable + and isinstance(observable[OBSERVABLE_TRANSFORMATION], str) + and observable[OBSERVABLE_TRANSFORMATION] + ): + amici_val += observable[OBSERVABLE_TRANSFORMATION] + "-" + + if ( + NOISE_DISTRIBUTION in observable + and isinstance(observable[NOISE_DISTRIBUTION], str) + and observable[NOISE_DISTRIBUTION] + ): + amici_val += observable[NOISE_DISTRIBUTION] + else: + amici_val += "normal" + amici_distrs[observable.name] = amici_val + + return amici_distrs + + +def petab_scale_to_amici_scale(scale_str: str) -> int: + """Convert PEtab parameter scaling string to AMICI scaling integer""" + + if scale_str == petab.LIN: + return amici.ParameterScaling_none + if scale_str == petab.LOG: + return amici.ParameterScaling_ln + if scale_str == petab.LOG10: + return amici.ParameterScaling_log10 + + raise ValueError(f"Invalid parameter scale {scale_str}") + + +def _create_model_name(folder: Union[str, Path]) -> str: + """ + Create a name for the model. + Just re-use the last part of the folder. + """ + return os.path.split(os.path.normpath(folder))[-1] + + +def _can_import_model( + model_name: str, model_output_dir: Union[str, Path] +) -> bool: + """ + Check whether a module of that name can already be imported. + """ + # try to import (in particular checks version) + try: + with amici.add_path(model_output_dir): + model_module = importlib.import_module(model_name) + except ModuleNotFoundError: + return False + + # no need to (re-)compile + return hasattr(model_module, "getModel") + + +def get_fixed_parameters( + petab_problem: petab.Problem, + non_estimated_parameters_as_constants=True, +) -> list[str]: + """ + Determine, set and return fixed model parameters. + + Non-estimated parameters and parameters specified in the condition table + are turned into constants (unless they are overridden). + Only global SBML parameters are considered. Local parameters are ignored. + + :param petab_problem: + The PEtab problem instance + + :param non_estimated_parameters_as_constants: + Whether parameters marked as non-estimated in PEtab should be + considered constant in AMICI. Setting this to ``True`` will reduce + model size and simulation times. If sensitivities with respect to those + parameters are required, this should be set to ``False``. + + :return: + list of IDs of parameters which are to be considered constant. + """ + # if we have a parameter table, all parameters that are allowed to be + # listed in the parameter table, but are not marked as estimated, can be + # turned into AMICI constants + # due to legacy API, we might not always have a parameter table, though + fixed_parameters = set() + if petab_problem.parameter_df is not None: + all_parameters = get_valid_parameters_for_parameter_table( + model=petab_problem.model, + condition_df=petab_problem.condition_df, + observable_df=petab_problem.observable_df + if petab_problem.observable_df is not None + else pd.DataFrame(columns=petab.OBSERVABLE_DF_REQUIRED_COLS), + measurement_df=petab_problem.measurement_df + if petab_problem.measurement_df is not None + else pd.DataFrame(columns=petab.MEASUREMENT_DF_REQUIRED_COLS), + ) + if non_estimated_parameters_as_constants: + estimated_parameters = petab_problem.parameter_df.index.values[ + petab_problem.parameter_df[ESTIMATE] == 1 + ] + else: + # don't treat parameter table parameters as constants + estimated_parameters = petab_problem.parameter_df.index.values + fixed_parameters = set(all_parameters) - set(estimated_parameters) + + # Column names are model parameter IDs, compartment IDs or species IDs. + # Thereof, all parameters except for any overridden ones should be made + # constant. + # (Could potentially still be made constant, but leaving them might + # increase model reusability) + + # handle parameters in condition table + condition_df = petab_problem.condition_df + if condition_df is not None: + logger.debug(f"Condition table: {condition_df.shape}") + + # remove overridden parameters (`object`-type columns) + fixed_parameters.update( + p + for p in condition_df.columns + # get rid of conditionName column + if p != CONDITION_NAME + # there is no parametric override + # TODO: could check if the final overriding parameter is estimated + # or not, but for now, we skip the parameter if there is any kind + # of overriding + if condition_df[p].dtype != "O" + # p is a parameter + and not petab_problem.model.is_state_variable(p) + ) + + # Ensure mentioned parameters exist in the model. Remove additional ones + # from list + for fixed_parameter in fixed_parameters.copy(): + # check global parameters + if not petab_problem.model.has_entity_with_id(fixed_parameter): + # TODO: could still exist as an output parameter? + logger.warning( + f"Column '{fixed_parameter}' used in condition " + "table but not entity with the corresponding ID " + "exists. Ignoring." + ) + fixed_parameters.remove(fixed_parameter) + + return list(sorted(fixed_parameters)) + + +def check_model( + amici_model: amici.Model, + petab_problem: petab.Problem, +) -> None: + """Check that the model is consistent with the PEtab problem.""" + if petab_problem.parameter_df is None: + return + + amici_ids_free = set(amici_model.getParameterIds()) + amici_ids = amici_ids_free | set(amici_model.getFixedParameterIds()) + + petab_ids_free = set( + petab_problem.parameter_df.loc[ + petab_problem.parameter_df[ESTIMATE] == 1 + ].index + ) + + amici_ids_free_required = petab_ids_free.intersection(amici_ids) + + if not amici_ids_free_required.issubset(amici_ids_free): + raise ValueError( + "The available AMICI model does not support estimating the " + "following parameters. Please recompile the model and ensure " + "that these parameters are not treated as constants. Deleting " + "the current model might also resolve this. Parameters: " + f"{amici_ids_free_required.difference(amici_ids_free)}" + ) diff --git a/python/sdist/amici/petab/parameter_mapping.py b/python/sdist/amici/petab/parameter_mapping.py new file mode 100644 index 0000000000..9c527f0395 --- /dev/null +++ b/python/sdist/amici/petab/parameter_mapping.py @@ -0,0 +1,697 @@ +from __future__ import annotations + +""" +Parameter mapping +----------------- + +When performing parameter inference, often parameters need to be mapped from +simulation to estimation parameters, and parameters can differ between +conditions. This can be handled using the `ParameterMapping`. + +Note +~~~~ + +While the parameter mapping can be used directly with AMICI, it was developed +for usage together with PEtab, for which the whole workflow of generating +the mapping is automatized. +""" + +import logging +import numbers +import re +from collections.abc import Sequence +from itertools import chain +from typing import Any, Union +from collections.abc import Collection, Iterator + +import amici +import numpy as np +import pandas as pd +import petab +import sympy as sp +from amici.sbml_import import get_species_initial +from petab.C import * # noqa: F403 +from petab.C import ( + LIN, + PARAMETER_SCALE, + PREEQUILIBRATION_CONDITION_ID, + SIMULATION_CONDITION_ID, +) +from petab.models import MODEL_TYPE_PYSB, MODEL_TYPE_SBML +from sympy.abc import _clash + +from .. import AmiciModel +from . import PREEQ_INDICATOR_ID +from .util import get_states_in_condition_table + +try: + import pysb +except ImportError: + pysb = None + + +logger = logging.getLogger(__name__) + +SingleParameterMapping = dict[str, Union[numbers.Number, str]] +SingleScaleMapping = dict[str, str] + + +class ParameterMappingForCondition: + """Parameter mapping for condition. + + Contains mappings for free parameters, fixed parameters, and fixed + preequilibration parameters, both for parameters and scales. + + In the scale mappings, for each simulation parameter the scale + on which the value is passed (and potentially gradients are to be + returned) is given. In the parameter mappings, for each simulation + parameter a corresponding optimization parameter (or a numeric value) + is given. + + If a mapping is not passed, the parameter mappings are assumed to be empty, + and if a scale mapping is not passed, all scales are set to linear. + + :param map_sim_var: + Mapping for free simulation parameters. + :param scale_map_sim_var: + Scales for free simulation parameters. + :param map_preeq_fix: + Mapping for fixed preequilibration parameters. + :param scale_map_preeq_fix: + Scales for fixed preequilibration parameters. + :param map_sim_fix: + Mapping for fixed simulation parameters. + :param scale_map_sim_fix: + Scales for fixed simulation parameters. + """ + + def __init__( + self, + map_sim_var: SingleParameterMapping = None, + scale_map_sim_var: SingleScaleMapping = None, + map_preeq_fix: SingleParameterMapping = None, + scale_map_preeq_fix: SingleScaleMapping = None, + map_sim_fix: SingleParameterMapping = None, + scale_map_sim_fix: SingleScaleMapping = None, + ): + if map_sim_var is None: + map_sim_var = {} + self.map_sim_var = map_sim_var + + if scale_map_sim_var is None: + scale_map_sim_var = {key: LIN for key in map_sim_var} + self.scale_map_sim_var = scale_map_sim_var + + if map_preeq_fix is None: + map_preeq_fix = {} + self.map_preeq_fix = map_preeq_fix + + if scale_map_preeq_fix is None: + scale_map_preeq_fix = {key: LIN for key in map_preeq_fix} + self.scale_map_preeq_fix = scale_map_preeq_fix + + if map_sim_fix is None: + map_sim_fix = {} + self.map_sim_fix = map_sim_fix + + if scale_map_sim_fix is None: + scale_map_sim_fix = {key: LIN for key in map_sim_fix} + self.scale_map_sim_fix = scale_map_sim_fix + + def __repr__(self): + return ( + f"{self.__class__.__name__}(" + f"map_sim_var={repr(self.map_sim_var)}," + f"scale_map_sim_var={repr(self.scale_map_sim_var)}," + f"map_preeq_fix={repr(self.map_preeq_fix)}," + f"scale_map_preeq_fix={repr(self.scale_map_preeq_fix)}," + f"map_sim_fix={repr(self.map_sim_fix)}," + f"scale_map_sim_fix={repr(self.scale_map_sim_fix)})" + ) + + @property + def free_symbols(self) -> set[str]: + """Get IDs of all (symbolic) parameters present in this mapping""" + return { + p + for p in chain( + self.map_sim_var.values(), + self.map_preeq_fix.values(), + self.map_sim_fix.values(), + ) + if isinstance(p, str) + } + + +class ParameterMapping(Sequence): + r"""Parameter mapping for multiple conditions. + + This can be used like a list of :class:`ParameterMappingForCondition`\ s. + + :param parameter_mappings: + List of parameter mappings for specific conditions. + """ + + def __init__( + self, parameter_mappings: list[ParameterMappingForCondition] = None + ): + super().__init__() + if parameter_mappings is None: + parameter_mappings = [] + self.parameter_mappings = parameter_mappings + + def __iter__(self): + yield from self.parameter_mappings + + def __getitem__( + self, item + ) -> ParameterMapping | ParameterMappingForCondition: + result = self.parameter_mappings[item] + if isinstance(result, ParameterMappingForCondition): + return result + return ParameterMapping(result) + + def __len__(self): + return len(self.parameter_mappings) + + def append( + self, parameter_mapping_for_condition: ParameterMappingForCondition + ): + """Append a condition specific parameter mapping.""" + self.parameter_mappings.append(parameter_mapping_for_condition) + + def __repr__(self): + return f"{self.__class__.__name__}({repr(self.parameter_mappings)})" + + @property + def free_symbols(self) -> set[str]: + """Get IDs of all (symbolic) parameters present in this mapping""" + return set.union(*(mapping.free_symbols for mapping in self)) + + +def petab_to_amici_scale(petab_scale: str) -> int: + """Convert petab scale id to amici scale id.""" + if petab_scale == LIN: + return amici.ParameterScaling_none + if petab_scale == LOG10: + return amici.ParameterScaling_log10 + if petab_scale == LOG: + return amici.ParameterScaling_ln + raise ValueError(f"PEtab scale not recognized: {petab_scale}") + + +def amici_to_petab_scale(amici_scale: int) -> str: + """Convert amici scale id to petab scale id.""" + if amici_scale == amici.ParameterScaling_none: + return LIN + if amici_scale == amici.ParameterScaling_log10: + return LOG10 + if amici_scale == amici.ParameterScaling_ln: + return LOG + raise ValueError(f"AMICI scale not recognized: {amici_scale}") + + +def scale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number: + """Bring parameter from linear scale to target scale. + + :param value: + Value to scale + :param petab_scale: + Target scale of ``value`` + + :return: + ``value`` on target scale + """ + if petab_scale == LIN: + return value + if petab_scale == LOG10: + return np.log10(value) + if petab_scale == LOG: + return np.log(value) + raise ValueError( + f"Unknown parameter scale {petab_scale}. " + f"Must be from {(LIN, LOG, LOG10)}" + ) + + +def unscale_parameter( + value: numbers.Number, petab_scale: str +) -> numbers.Number: + """Bring parameter from scale to linear scale. + + :param value: + Value to scale + :param petab_scale: + Target scale of ``value`` + + :return: + ``value`` on linear scale + """ + if petab_scale == LIN: + return value + if petab_scale == LOG10: + return np.power(10, value) + if petab_scale == LOG: + return np.exp(value) + raise ValueError( + f"Unknown parameter scale {petab_scale}. " + f"Must be from {(LIN, LOG, LOG10)}" + ) + + +def scale_parameters_dict( + value_dict: dict[Any, numbers.Number], petab_scale_dict: dict[Any, str] +) -> None: + """ + Bring parameters from linear scale to target scale. + + Bring values in ``value_dict`` from linear scale to the scale + provided in ``petab_scale_dict`` (in-place). + Both arguments are expected to have the same length and matching keys. + + :param value_dict: + Values to scale + + :param petab_scale_dict: + Target scales of ``values`` + """ + if value_dict.keys() != petab_scale_dict.keys(): + raise AssertionError("Keys don't match.") + + for key, value in value_dict.items(): + value_dict[key] = scale_parameter(value, petab_scale_dict[key]) + + +def unscale_parameters_dict( + value_dict: dict[Any, numbers.Number], petab_scale_dict: dict[Any, str] +) -> None: + """ + Bring parameters from target scale to linear scale. + + Bring values in ``value_dict`` from linear scale to the scale + provided in ``petab_scale_dict`` (in-place). + Both arguments are expected to have the same length and matching keys. + + :param value_dict: + Values to scale + + :param petab_scale_dict: + Target scales of ``values`` + """ + if value_dict.keys() != petab_scale_dict.keys(): + raise AssertionError("Keys don't match.") + + for key, value in value_dict.items(): + value_dict[key] = unscale_parameter(value, petab_scale_dict[key]) + + +def create_parameter_mapping( + petab_problem: petab.Problem, + simulation_conditions: pd.DataFrame | list[dict], + scaled_parameters: bool, + amici_model: AmiciModel, + **parameter_mapping_kwargs, +) -> ParameterMapping: + """Generate AMICI specific parameter mapping. + + :param petab_problem: + PEtab problem + :param simulation_conditions: + Result of :func:`petab.get_simulation_conditions`. Can be provided to + save time if this has been obtained before. + :param scaled_parameters: + If ``True``, problem_parameters are assumed to be on the scale provided + in the PEtab parameter table and will be unscaled. If ``False``, they + are assumed to be in linear scale. + :param amici_model: + AMICI model. + :param parameter_mapping_kwargs: + Optional keyword arguments passed to + :func:`petab.get_optimization_to_simulation_parameter_mapping`. + To allow changing fixed PEtab problem parameters (``estimate=0``), + use ``fill_fixed_parameters=False``. + :return: + List of the parameter mappings. + """ + if simulation_conditions is None: + simulation_conditions = ( + petab_problem.get_simulation_conditions_from_measurement_df() + ) + if isinstance(simulation_conditions, list): + simulation_conditions = pd.DataFrame(data=simulation_conditions) + + # Because AMICI globalizes all local parameters during model import, + # we need to do that here as well to prevent parameter mapping errors + # (PEtab does currently not care about SBML LocalParameters) + if petab_problem.model.type_id == MODEL_TYPE_SBML: + import libsbml + + if petab_problem.sbml_document: + converter_config = ( + libsbml.SBMLLocalParameterConverter().getDefaultProperties() + ) + petab_problem.sbml_document.convert(converter_config) + else: + logger.debug( + "No petab_problem.sbml_document is set. Cannot " + "convert SBML LocalParameters. If the model contains " + "LocalParameters, parameter mapping will fail." + ) + + default_parameter_mapping_kwargs = { + "warn_unmapped": False, + "scaled_parameters": scaled_parameters, + "allow_timepoint_specific_numeric_noise_parameters": not petab.lint.observable_table_has_nontrivial_noise_formula( + petab_problem.observable_df + ), + } + if parameter_mapping_kwargs is None: + parameter_mapping_kwargs = {} + + prelim_parameter_mapping = ( + petab.get_optimization_to_simulation_parameter_mapping( + condition_df=petab_problem.condition_df, + measurement_df=petab_problem.measurement_df, + parameter_df=petab_problem.parameter_df, + observable_df=petab_problem.observable_df, + mapping_df=petab_problem.mapping_df, + model=petab_problem.model, + simulation_conditions=simulation_conditions, + **dict( + default_parameter_mapping_kwargs, **parameter_mapping_kwargs + ), + ) + ) + + parameter_mapping = ParameterMapping() + for (_, condition), prelim_mapping_for_condition in zip( + simulation_conditions.iterrows(), prelim_parameter_mapping + ): + mapping_for_condition = create_parameter_mapping_for_condition( + prelim_mapping_for_condition, condition, petab_problem, amici_model + ) + parameter_mapping.append(mapping_for_condition) + + return parameter_mapping + + +def create_parameter_mapping_for_condition( + parameter_mapping_for_condition: petab.ParMappingDictQuadruple, + condition: pd.Series | dict, + petab_problem: petab.Problem, + amici_model: AmiciModel, +) -> ParameterMappingForCondition: + """Generate AMICI specific parameter mapping for condition. + + :param parameter_mapping_for_condition: + Preliminary parameter mapping for condition. + :param condition: + :class:`pandas.DataFrame` row with ``preequilibrationConditionId`` and + ``simulationConditionId``. + :param petab_problem: + Underlying PEtab problem. + :param amici_model: + AMICI model. + + :return: + The parameter and parameter scale mappings, for fixed + preequilibration, fixed simulation, and variable simulation + parameters, and then the respective scalings. + """ + ( + condition_map_preeq, + condition_map_sim, + condition_scale_map_preeq, + condition_scale_map_sim, + ) = parameter_mapping_for_condition + logger.debug(f"PEtab mapping: {parameter_mapping_for_condition}") + + if len(condition_map_preeq) != len(condition_scale_map_preeq) or len( + condition_map_sim + ) != len(condition_scale_map_sim): + raise AssertionError( + "Number of parameters and number of parameter " + "scales do not match." + ) + if len(condition_map_preeq) and len(condition_map_preeq) != len( + condition_map_sim + ): + logger.debug(f"Preequilibration parameter map: {condition_map_preeq}") + logger.debug(f"Simulation parameter map: {condition_map_sim}") + raise AssertionError( + "Number of parameters for preequilbration " + "and simulation do not match." + ) + + ########################################################################## + # initial states + # Initial states have been set during model import based on the SBML model. + # If initial states were overwritten in the PEtab condition table, they are + # applied here. + # During model generation, parameters for initial concentrations and + # respective initial assignments have been created for the + # relevant species, here we add these parameters to the parameter mapping. + # In absence of preequilibration this could also be handled via + # ExpData.x0, but in the case of preequilibration this would not allow for + # resetting initial states. + + if states_in_condition_table := get_states_in_condition_table( + petab_problem, condition + ): + # set indicator fixed parameter for preeq + # (we expect here, that this parameter was added during import and + # that it was not added by the user with a different meaning...) + if condition_map_preeq: + condition_map_preeq[PREEQ_INDICATOR_ID] = 1.0 + condition_scale_map_preeq[PREEQ_INDICATOR_ID] = LIN + + condition_map_sim[PREEQ_INDICATOR_ID] = 0.0 + condition_scale_map_sim[PREEQ_INDICATOR_ID] = LIN + + for element_id, ( + value, + preeq_value, + ) in states_in_condition_table.items(): + # for preequilibration + init_par_id = f"initial_{element_id}_preeq" + if ( + condition_id := condition.get(PREEQUILIBRATION_CONDITION_ID) + ) is not None: + _set_initial_state( + petab_problem, + condition_id, + element_id, + init_par_id, + condition_map_preeq, + condition_scale_map_preeq, + preeq_value, + ) + else: + # need to set dummy value for preeq parameter anyways, as it + # is expected below (set to 0, not nan, because will be + # multiplied with indicator variable in initial assignment) + condition_map_sim[init_par_id] = 0.0 + condition_scale_map_sim[init_par_id] = LIN + + # for simulation + condition_id = condition[SIMULATION_CONDITION_ID] + init_par_id = f"initial_{element_id}_sim" + _set_initial_state( + petab_problem, + condition_id, + element_id, + init_par_id, + condition_map_sim, + condition_scale_map_sim, + value, + ) + + ########################################################################## + # separate fixed and variable AMICI parameters, because we may have + # different fixed parameters for preeq and sim condition, but we cannot + # have different variable parameters. without splitting, + # merge_preeq_and_sim_pars_condition below may fail. + # TODO: This can be done already in parameter mapping creation. + variable_par_ids = amici_model.getParameterIds() + fixed_par_ids = amici_model.getFixedParameterIds() + + condition_map_preeq_var, condition_map_preeq_fix = _subset_dict( + condition_map_preeq, variable_par_ids, fixed_par_ids + ) + + ( + condition_scale_map_preeq_var, + condition_scale_map_preeq_fix, + ) = _subset_dict( + condition_scale_map_preeq, variable_par_ids, fixed_par_ids + ) + + condition_map_sim_var, condition_map_sim_fix = _subset_dict( + condition_map_sim, variable_par_ids, fixed_par_ids + ) + + condition_scale_map_sim_var, condition_scale_map_sim_fix = _subset_dict( + condition_scale_map_sim, variable_par_ids, fixed_par_ids + ) + + logger.debug( + "Fixed parameters preequilibration: " f"{condition_map_preeq_fix}" + ) + logger.debug("Fixed parameters simulation: " f"{condition_map_sim_fix}") + logger.debug( + "Variable parameters preequilibration: " f"{condition_map_preeq_var}" + ) + logger.debug("Variable parameters simulation: " f"{condition_map_sim_var}") + + petab.merge_preeq_and_sim_pars_condition( + condition_map_preeq_var, + condition_map_sim_var, + condition_scale_map_preeq_var, + condition_scale_map_sim_var, + condition, + ) + logger.debug(f"Merged: {condition_map_sim_var}") + + parameter_mapping_for_condition = ParameterMappingForCondition( + map_preeq_fix=condition_map_preeq_fix, + map_sim_fix=condition_map_sim_fix, + map_sim_var=condition_map_sim_var, + scale_map_preeq_fix=condition_scale_map_preeq_fix, + scale_map_sim_fix=condition_scale_map_sim_fix, + scale_map_sim_var=condition_scale_map_sim_var, + ) + + return parameter_mapping_for_condition + + +def _set_initial_state( + petab_problem, + condition_id, + element_id, + init_par_id, + par_map, + scale_map, + value, +): + value = petab.to_float_if_float(value) + if pd.isna(value): + if petab_problem.model.type_id == MODEL_TYPE_SBML: + value = _get_initial_state_sbml(petab_problem, element_id) + elif petab_problem.model.type_id == MODEL_TYPE_PYSB: + value = _get_initial_state_pysb(petab_problem, element_id) + + try: + value = float(value) + except (ValueError, TypeError): + if sp.nsimplify(value).is_Atom and ( + pysb is None or not isinstance(value, pysb.Component) + ): + # Get rid of multiplication with one + value = sp.nsimplify(value) + else: + raise NotImplementedError( + "Cannot handle non-trivial initial state " + f"expression for {element_id}: {value}" + ) + # this should be a parameter ID + value = str(value) + logger.debug( + f"The species {element_id} has no initial value " + f"defined for the condition {condition_id} in " + "the PEtab conditions table. The initial value is " + f"now set to {value}, which is the initial value " + "defined in the SBML model." + ) + par_map[init_par_id] = value + if isinstance(value, float): + # numeric initial state + scale_map[init_par_id] = petab.LIN + else: + # parametric initial state + scale_map[init_par_id] = petab_problem.parameter_df[ + PARAMETER_SCALE + ].get(value, petab.LIN) + + +def _subset_dict( + full: dict[Any, Any], *args: Collection[Any] +) -> Iterator[dict[Any, Any]]: + """Get subset of dictionary based on provided keys + + :param full: + Dictionary to subset + :param args: + Collections of keys to be contained in the different subsets + + :return: + subsetted dictionary + """ + for keys in args: + yield {key: val for (key, val) in full.items() if key in keys} + + +def _get_initial_state_sbml( + petab_problem: petab.Problem, element_id: str +) -> float | sp.Basic: + import libsbml + + element = petab_problem.sbml_model.getElementBySId(element_id) + type_code = element.getTypeCode() + initial_assignment = petab_problem.sbml_model.getInitialAssignmentBySymbol( + element_id + ) + if initial_assignment: + initial_assignment = sp.sympify( + libsbml.formulaToL3String(initial_assignment.getMath()), + locals=_clash, + ) + if type_code == libsbml.SBML_SPECIES: + value = ( + get_species_initial(element) + if initial_assignment is None + else initial_assignment + ) + elif type_code == libsbml.SBML_PARAMETER: + value = ( + element.getValue() + if initial_assignment is None + else initial_assignment + ) + elif type_code == libsbml.SBML_COMPARTMENT: + value = ( + element.getSize() + if initial_assignment is None + else initial_assignment + ) + else: + raise NotImplementedError( + f"Don't know what how to handle {element_id} in " + "condition table." + ) + return value + + +def _get_initial_state_pysb( + petab_problem: petab.Problem, element_id: str +) -> float | sp.Symbol: + species_idx = int(re.match(r"__s(\d+)$", element_id)[1]) + species_pattern = petab_problem.model.model.species[species_idx] + from pysb.pattern import match_complex_pattern + + value = next( + ( + initial.value + for initial in petab_problem.model.model.initials + if match_complex_pattern( + initial.pattern, species_pattern, exact=True + ) + ), + 0.0, + ) + if isinstance(value, pysb.Parameter): + if value.name in petab_problem.parameter_df.index: + value = value.name + else: + value = value.value + + return value diff --git a/python/sdist/amici/petab/petab_import.py b/python/sdist/amici/petab/petab_import.py new file mode 100644 index 0000000000..558f51ca15 --- /dev/null +++ b/python/sdist/amici/petab/petab_import.py @@ -0,0 +1,153 @@ +""" +PEtab Import +------------ +Import a model in the :mod:`petab` (https://github.com/PEtab-dev/PEtab) format +into AMICI. +""" + +import logging +import os +import shutil +from pathlib import Path +from typing import Union + +import amici +import petab +from petab.models import MODEL_TYPE_PYSB, MODEL_TYPE_SBML + +from ..logging import get_logger +from .import_helpers import _can_import_model, _create_model_name, check_model +from .sbml_import import import_model_sbml + +try: + from .pysb_import import import_model_pysb +except ModuleNotFoundError: + # pysb not available + import_model_pysb = None + + +__all__ = ["import_petab_problem"] + +logger = get_logger(__name__, logging.WARNING) + + +def import_petab_problem( + petab_problem: petab.Problem, + model_output_dir: Union[str, Path, None] = None, + model_name: str = None, + force_compile: bool = False, + non_estimated_parameters_as_constants=True, + **kwargs, +) -> "amici.Model": + """ + Create an AMICI model for a PEtab problem. + + :param petab_problem: + A petab problem containing all relevant information on the model. + + :param model_output_dir: + Directory to write the model code to. It will be created if it doesn't + exist. Defaults to current directory. + + :param model_name: + Name of the generated model module. Defaults to the ID of the model + or the model file name without the extension. + + :param force_compile: + Whether to compile the model even if the target folder is not empty, + or the model exists already. + + :param non_estimated_parameters_as_constants: + Whether parameters marked as non-estimated in PEtab should be + considered constant in AMICI. Setting this to ``True`` will reduce + model size and simulation times. If sensitivities with respect to those + parameters are required, this should be set to ``False``. + + :param kwargs: + Additional keyword arguments to be passed to + :meth:`amici.sbml_import.SbmlImporter.sbml2amici` or + :func:`amici.pysb_import.pysb2amici`, depending on the model type. + + :return: + The imported model. + """ + if petab_problem.model.type_id not in (MODEL_TYPE_SBML, MODEL_TYPE_PYSB): + raise NotImplementedError( + "Unsupported model type " + petab_problem.model.type_id + ) + + if petab_problem.mapping_df is not None: + # It's partially supported. Remove at your own risk... + raise NotImplementedError( + "PEtab v2.0.0 mapping tables are not yet supported." + ) + + model_name = model_name or petab_problem.model.model_id + + if petab_problem.model.type_id == MODEL_TYPE_PYSB and model_name is None: + model_name = petab_problem.pysb_model.name + elif model_name is None and model_output_dir: + model_name = _create_model_name(model_output_dir) + + # generate folder and model name if necessary + if model_output_dir is None: + if petab_problem.model.type_id == MODEL_TYPE_PYSB: + raise ValueError("Parameter `model_output_dir` is required.") + + from .sbml_import import _create_model_output_dir_name + + model_output_dir = _create_model_output_dir_name( + petab_problem.sbml_model, model_name + ) + else: + model_output_dir = os.path.abspath(model_output_dir) + + # create folder + if not os.path.exists(model_output_dir): + os.makedirs(model_output_dir) + + # check if compilation necessary + if force_compile or not _can_import_model(model_name, model_output_dir): + # check if folder exists + if os.listdir(model_output_dir) and not force_compile: + raise ValueError( + f"Cannot compile to {model_output_dir}: not empty. " + "Please assign a different target or set `force_compile`." + ) + + # remove folder if exists + if os.path.exists(model_output_dir): + shutil.rmtree(model_output_dir) + + logger.info(f"Compiling model {model_name} to {model_output_dir}.") + # compile the model + if petab_problem.model.type_id == MODEL_TYPE_PYSB: + import_model_pysb( + petab_problem, + model_name=model_name, + model_output_dir=model_output_dir, + **kwargs, + ) + else: + import_model_sbml( + petab_problem=petab_problem, + model_name=model_name, + model_output_dir=model_output_dir, + non_estimated_parameters_as_constants=non_estimated_parameters_as_constants, + **kwargs, + ) + + # import model + model_module = amici.import_model_module(model_name, model_output_dir) + model = model_module.getModel() + check_model(amici_model=model, petab_problem=petab_problem) + + logger.info( + f"Successfully loaded model {model_name} " f"from {model_output_dir}." + ) + + return model + + +# for backwards compatibility +import_model = import_model_sbml diff --git a/python/sdist/amici/petab/petab_problem.py b/python/sdist/amici/petab/petab_problem.py new file mode 100644 index 0000000000..8ea177ad03 --- /dev/null +++ b/python/sdist/amici/petab/petab_problem.py @@ -0,0 +1,275 @@ +"""PEtab-problem based simulations.""" +import copy +from typing import Optional, Union + +import amici +import pandas as pd +import petab +from petab.C import PREEQUILIBRATION_CONDITION_ID, SIMULATION_CONDITION_ID + +from .conditions import create_edatas, fill_in_parameters +from .parameter_mapping import create_parameter_mapping + + +class PetabProblem: + """Manage experimental conditions based on a PEtab problem definition. + + Create :class:`ExpData` objects from a PEtab problem definition, and handle + parameter scales and parameter mapping. + + :param petab_problem: PEtab problem definition. + :param amici_model: AMICI model + :param problem_parameters: Problem parameters to use for simulation + (default: PEtab nominal values and model values). + :param scaled_parameters: Whether the provided parameters are on PEtab + `parameterScale` or not. + :param simulation_conditions: Simulation conditions to use for simulation. + It can be used to subset the conditions in the PEtab problem. + All subsequent operations will only be performed on that subset. + By default, all conditions are used. + :param store_edatas: Whether to create and store all `ExpData` objects for + all conditions upfront. If set to ``False``, `ExpData` objects will be + created and disposed of on the fly during simulation. The latter saves + memory if the given PEtab problem comprises many simulation conditions. + """ + + def __init__( + self, + petab_problem: petab.Problem, + amici_model: Optional[amici.Model] = None, + problem_parameters: Optional[dict[str, float]] = None, + scaled_parameters: bool = False, + simulation_conditions: Union[pd.DataFrame, list[dict]] = None, + store_edatas: bool = True, + ): + self._petab_problem = copy.deepcopy(petab_problem) + + if amici_model is not None: + self._amici_model = amici_model + else: + from .petab_import import import_petab_problem + + self._amici_model = import_petab_problem(petab_problem) + + self._scaled_parameters = scaled_parameters + + self._simulation_conditions = simulation_conditions or ( + petab_problem.get_simulation_conditions_from_measurement_df() + ) + if not isinstance(self._simulation_conditions, pd.DataFrame): + self._simulation_conditions = pd.DataFrame( + self._simulation_conditions + ) + if ( + preeq_id := PREEQUILIBRATION_CONDITION_ID + ) in self._simulation_conditions: + self._simulation_conditions[ + preeq_id + ] = self._simulation_conditions[preeq_id].fillna("") + + if problem_parameters is None: + # Use PEtab nominal values as default + self._problem_parameters = self._default_parameters() + if scaled_parameters: + raise NotImplementedError( + "scaled_parameters=True in combination with default " + "parameters is not implemented yet." + ) + else: + self._problem_parameters = problem_parameters + + if store_edatas: + self._parameter_mapping = create_parameter_mapping( + petab_problem=self._petab_problem, + simulation_conditions=self._simulation_conditions, + scaled_parameters=self._scaled_parameters, + amici_model=self._amici_model, + ) + self._create_edatas() + else: + self._parameter_mapping = None + self._edatas = None + + def set_parameters( + self, + problem_parameters: dict[str, float], + scaled_parameters: bool = False, + ): + """Set problem parameters. + + :param problem_parameters: Problem parameters to use for simulation. + This may be a subset of all parameters. + :param scaled_parameters: Whether the provided parameters are on PEtab + `parameterScale` or not. + """ + if scaled_parameters != self._scaled_parameters and self._parameter_mapping is not None: + # redo parameter mapping if scale changed + self._parameter_mapping = create_parameter_mapping( + petab_problem=self._petab_problem, + simulation_conditions=self._simulation_conditions, + scaled_parameters=scaled_parameters, + amici_model=self._amici_model, + ) + + if set(self._problem_parameters) - set(problem_parameters): + # not all parameters are provided - update + # bring previously set parameters to the same scale if necessary + if scaled_parameters and not self._scaled_parameters: + self._problem_parameters = ( + self._petab_problem.scale_parameters( + self._problem_parameters, + ) + ) + elif not scaled_parameters and self._scaled_parameters: + self._problem_parameters = ( + self._petab_problem.unscale_parameters( + self._problem_parameters, + ) + ) + self._problem_parameters |= problem_parameters + else: + self._problem_parameters = problem_parameters + + self._scaled_parameters = scaled_parameters + + if self._edatas: + fill_in_parameters( + edatas=self._edatas, + problem_parameters=self._problem_parameters, + scaled_parameters=self._scaled_parameters, + parameter_mapping=self._parameter_mapping, + amici_model=self._amici_model, + ) + + def get_edata( + self, condition_id: str, preequilibration_condition_id: str = None + ) -> amici.ExpData: + """Get ExpData object for a given condition. + + NOTE: If ``store_edatas=True`` was passed to the constructor and the + returned object is modified, the changes will be reflected in the + internal `ExpData` objects. Also, if parameter values of + `PetabProblem` are changed, all `ExpData` objects will be updated. + Create a deep copy if you want to avoid this. + + :param condition_id: PEtab condition ID + :param preequilibration_condition_id: PEtab preequilibration condition ID + :return: ExpData object + """ + # exists or has to be created? + if self._edatas: + edata_id = condition_id + if preequilibration_condition_id: + edata_id += "+" + preequilibration_condition_id + + for edata in self._edatas: + if edata.id == edata_id: + return edata + + return self._create_edata(condition_id, preequilibration_condition_id) + + def get_edatas(self): + """Get all ExpData objects. + + NOTE: If ``store_edatas=True`` was passed to the constructor and the + returned objects are modified, the changes will be reflected in the + internal `ExpData` objects. Also, if parameter values of + `PetabProblem` are changed, all `ExpData` objects will be updated. + Create a deep copy if you want to avoid this. + + :return: List of ExpData objects + """ + if self._edatas: + # shallow copy + return self._edatas.copy() + + # not storing edatas - create and return + self._parameter_mapping = create_parameter_mapping( + petab_problem=self._petab_problem, + simulation_conditions=self._simulation_conditions, + scaled_parameters=self._scaled_parameters, + amici_model=self._amici_model, + ) + self._create_edatas() + result = self._edatas + self._edatas = [] + return result + + def _create_edata( + self, condition_id: str, preequilibration_condition_id: str + ) -> amici.ExpData: + """Create ExpData object for a given condition. + + :param condition_id: PEtab condition ID + :param preequilibration_condition_id: PEtab preequilibration condition ID + :return: ExpData object + """ + simulation_condition = pd.DataFrame( + [ + { + SIMULATION_CONDITION_ID: condition_id, + PREEQUILIBRATION_CONDITION_ID: preequilibration_condition_id + or None, + } + ] + ) + edatas = create_edatas( + amici_model=self._amici_model, + petab_problem=self._petab_problem, + simulation_conditions=simulation_condition, + ) + parameter_mapping = create_parameter_mapping( + petab_problem=self._petab_problem, + simulation_conditions=simulation_condition, + scaled_parameters=self._scaled_parameters, + amici_model=self._amici_model, + ) + + # Fill parameters in ExpDatas (in-place) + fill_in_parameters( + edatas=edatas, + problem_parameters={ + p: self._problem_parameters[p] + for p in parameter_mapping.free_symbols + if p in self._problem_parameters + }, + scaled_parameters=self._scaled_parameters, + parameter_mapping=parameter_mapping, + amici_model=self._amici_model, + ) + + if len(edatas) != 1: + raise AssertionError("Expected exactly one ExpData object.") + return edatas[0] + + def _create_edatas( + self, + ): + """Create ExpData objects from PEtab problem definition.""" + self._edatas = create_edatas( + amici_model=self._amici_model, + petab_problem=self._petab_problem, + simulation_conditions=self._simulation_conditions, + ) + + fill_in_parameters( + edatas=self._edatas, + problem_parameters=self._problem_parameters, + scaled_parameters=self._scaled_parameters, + parameter_mapping=self._parameter_mapping, + amici_model=self._amici_model, + ) + + def _default_parameters(self) -> dict[str, float]: + """Get unscaled default parameters.""" + return { + t.Index: getattr(t, petab.NOMINAL_VALUE) + for t in self._petab_problem.parameter_df[ + self._petab_problem.parameter_df[petab.ESTIMATE] == 1 + ].itertuples() + } + + @property + def model(self) -> amici.Model: + """AMICI model.""" + return self._amici_model diff --git a/python/sdist/amici/petab/pysb_import.py b/python/sdist/amici/petab/pysb_import.py new file mode 100644 index 0000000000..8c67bb0785 --- /dev/null +++ b/python/sdist/amici/petab/pysb_import.py @@ -0,0 +1,274 @@ +""" +PySB-PEtab Import +----------------- +Import a model in the PySB-adapted :mod:`petab` +(https://github.com/PEtab-dev/PEtab) format into AMICI. +""" + +import logging +import re +from pathlib import Path +from typing import Optional, Union + +import petab +import pysb +import pysb.bng +import sympy as sp +from petab.C import CONDITION_NAME, NOISE_FORMULA, OBSERVABLE_FORMULA +from petab.models.pysb_model import PySBModel + +from ..logging import get_logger, log_execution_time, set_log_level +from . import PREEQ_INDICATOR_ID +from .import_helpers import ( + get_fixed_parameters, + petab_noise_distributions_to_amici, +) +from .util import get_states_in_condition_table + +logger = get_logger(__name__, logging.WARNING) + + +def _add_observation_model( + pysb_model: pysb.Model, petab_problem: petab.Problem +): + """Extend PySB model by observation model as defined in the PEtab + observables table""" + + # add any required output parameters + local_syms = { + sp.Symbol.__str__(comp): comp + for comp in pysb_model.components + if isinstance(comp, sp.Symbol) + } + for formula in [ + *petab_problem.observable_df[OBSERVABLE_FORMULA], + *petab_problem.observable_df[NOISE_FORMULA], + ]: + sym = sp.sympify(formula, locals=local_syms) + for s in sym.free_symbols: + if not isinstance(s, pysb.Component): + p = pysb.Parameter(str(s), 1.0) + pysb_model.add_component(p) + local_syms[sp.Symbol.__str__(p)] = p + + # add observables and sigmas to pysb model + for observable_id, observable_formula, noise_formula in zip( + petab_problem.observable_df.index, + petab_problem.observable_df[OBSERVABLE_FORMULA], + petab_problem.observable_df[NOISE_FORMULA], + ): + obs_symbol = sp.sympify(observable_formula, locals=local_syms) + if observable_id in pysb_model.expressions.keys(): + obs_expr = pysb_model.expressions[observable_id] + else: + obs_expr = pysb.Expression(observable_id, obs_symbol) + pysb_model.add_component(obs_expr) + local_syms[observable_id] = obs_expr + + sigma_id = f"{observable_id}_sigma" + sigma_symbol = sp.sympify(noise_formula, locals=local_syms) + sigma_expr = pysb.Expression(sigma_id, sigma_symbol) + pysb_model.add_component(sigma_expr) + local_syms[sigma_id] = sigma_expr + + +def _add_initialization_variables( + pysb_model: pysb.Model, petab_problem: petab.Problem +): + """Add initialization variables to the PySB model to support initial + conditions specified in the PEtab condition table. + + To parameterize initial states, we currently need initial assignments. + If they occur in the condition table, we create a new parameter + initial_${speciesID}. Feels dirty and should be changed (see also #924). + """ + + initial_states = get_states_in_condition_table(petab_problem) + fixed_parameters = [] + if initial_states: + # add preequilibration indicator variable + # NOTE: would only be required if we actually have preequilibration + # adding it anyways. can be optimized-out later + if PREEQ_INDICATOR_ID in [c.name for c in pysb_model.components]: + raise AssertionError( + "Model already has a component with ID " + f"{PREEQ_INDICATOR_ID}. Cannot handle " + "species and compartments in condition table " + "then." + ) + preeq_indicator = pysb.Parameter(PREEQ_INDICATOR_ID) + pysb_model.add_component(preeq_indicator) + # Can only reset parameters after preequilibration if they are fixed. + fixed_parameters.append(PREEQ_INDICATOR_ID) + logger.debug( + "Adding preequilibration indicator constant " + f"{PREEQ_INDICATOR_ID}" + ) + logger.debug(f"Adding initial assignments for {initial_states.keys()}") + + for assignee_id in initial_states: + init_par_id_preeq = f"initial_{assignee_id}_preeq" + init_par_id_sim = f"initial_{assignee_id}_sim" + for init_par_id in [init_par_id_preeq, init_par_id_sim]: + if init_par_id in [c.name for c in pysb_model.components]: + raise ValueError( + "Cannot create parameter for initial assignment " + f"for {assignee_id} because an entity named " + f"{init_par_id} exists already in the model." + ) + p = pysb.Parameter(init_par_id) + pysb_model.add_component(p) + + species_idx = int(re.match(r"__s(\d+)$", assignee_id)[1]) + # use original model here since that's what was used to generate + # the ids in initial_states + species_pattern = petab_problem.model.model.species[species_idx] + + # species pattern comes from the _original_ model, but we only want + # to modify pysb_model, so we have to reconstitute the pattern using + # pysb_model + for c in pysb_model.components: + globals()[c.name] = c + species_pattern = pysb.as_complex_pattern(eval(str(species_pattern))) + + from pysb.pattern import match_complex_pattern + + formula = pysb.Expression( + f"initial_{assignee_id}_formula", + preeq_indicator * pysb_model.parameters[init_par_id_preeq] + + (1 - preeq_indicator) * pysb_model.parameters[init_par_id_sim], + ) + pysb_model.add_component(formula) + + for initial in pysb_model.initials: + if match_complex_pattern( + initial.pattern, species_pattern, exact=True + ): + logger.debug( + "The PySB model has an initial defined for species " + f"{assignee_id}, but this species also has an initial " + "value defined in the PEtab condition table. The SBML " + "initial assignment will be overwritten to handle " + "preequilibration and initial values specified by the " + "PEtab problem." + ) + initial.value = formula + break + else: + # No initial in the pysb model, so add one + init = pysb.Initial(species_pattern, formula) + pysb_model.add_component(init) + + return fixed_parameters + + +@log_execution_time("Importing PEtab model", logger) +def import_model_pysb( + petab_problem: petab.Problem, + model_output_dir: Optional[Union[str, Path]] = None, + verbose: Optional[Union[bool, int]] = True, + model_name: Optional[str] = None, + **kwargs, +) -> None: + """ + Create AMICI model from PySB-PEtab problem + + :param petab_problem: + PySB PEtab problem + + :param model_output_dir: + Directory to write the model code to. Will be created if doesn't + exist. Defaults to current directory. + + :param verbose: + Print/log extra information. + + :param model_name: + Name of the generated model module + + :param kwargs: + Additional keyword arguments to be passed to + :func:`amici.pysb_import.pysb2amici`. + """ + set_log_level(logger, verbose) + + logger.info("Importing model ...") + + if not isinstance(petab_problem.model, PySBModel): + raise ValueError("Not a PySB model") + + # need to create a copy here as we don't want to modify the original + pysb.SelfExporter.cleanup() + og_export = pysb.SelfExporter.do_export + pysb.SelfExporter.do_export = False + pysb_model = pysb.Model( + base=petab_problem.model.model, + name=petab_problem.model.model_id, + ) + + _add_observation_model(pysb_model, petab_problem) + # generate species for the _original_ model + pysb.bng.generate_equations(petab_problem.model.model) + fixed_parameters = _add_initialization_variables(pysb_model, petab_problem) + pysb.SelfExporter.do_export = og_export + + # check condition table for supported features, important to use pysb_model + # here, as we want to also cover output parameters + model_parameters = [p.name for p in pysb_model.parameters] + condition_species_parameters = get_states_in_condition_table( + petab_problem, return_patterns=True + ) + for x in petab_problem.condition_df.columns: + if x == CONDITION_NAME: + continue + + x = petab.mapping.resolve_mapping(petab_problem.mapping_df, x) + + # parameters + if x in model_parameters: + continue + + # species/pattern + if x in condition_species_parameters: + continue + + raise NotImplementedError( + "For PySB PEtab import, only model parameters and species, but " + "not compartments are allowed in the condition table. Offending " + f"column: {x}" + ) + + constant_parameters = ( + get_fixed_parameters(petab_problem) + fixed_parameters + ) + + if petab_problem.observable_df is None: + observables = None + sigmas = None + noise_distrs = None + else: + observables = [ + expr.name + for expr in pysb_model.expressions + if expr.name in petab_problem.observable_df.index + ] + + sigmas = {obs_id: f"{obs_id}_sigma" for obs_id in observables} + + noise_distrs = petab_noise_distributions_to_amici( + petab_problem.observable_df + ) + + from amici.pysb_import import pysb2amici + + pysb2amici( + model=pysb_model, + output_dir=model_output_dir, + model_name=model_name, + verbose=True, + observables=observables, + sigmas=sigmas, + constant_parameters=constant_parameters, + noise_distributions=noise_distrs, + **kwargs, + ) diff --git a/python/sdist/amici/petab/sbml_import.py b/python/sdist/amici/petab/sbml_import.py new file mode 100644 index 0000000000..6388d6f8b0 --- /dev/null +++ b/python/sdist/amici/petab/sbml_import.py @@ -0,0 +1,553 @@ +import logging +import math +import os +import tempfile +from itertools import chain +from pathlib import Path +from typing import Optional, Union +from warnings import warn + +import amici +import libsbml +import pandas as pd +import petab +import sympy as sp +from _collections import OrderedDict +from amici.logging import log_execution_time, set_log_level +from petab.models import MODEL_TYPE_SBML +from sympy.abc import _clash + +from . import PREEQ_INDICATOR_ID +from .import_helpers import ( + check_model, + get_fixed_parameters, + get_observation_model, +) +from .util import get_states_in_condition_table + +logger = logging.getLogger(__name__) + + +@log_execution_time("Importing PEtab model", logger) +def import_model_sbml( + sbml_model: Union[str, Path, "libsbml.Model"] = None, + condition_table: Optional[Union[str, Path, pd.DataFrame]] = None, + observable_table: Optional[Union[str, Path, pd.DataFrame]] = None, + measurement_table: Optional[Union[str, Path, pd.DataFrame]] = None, + petab_problem: petab.Problem = None, + model_name: Optional[str] = None, + model_output_dir: Optional[Union[str, Path]] = None, + verbose: Optional[Union[bool, int]] = True, + allow_reinit_fixpar_initcond: bool = True, + validate: bool = True, + non_estimated_parameters_as_constants=True, + output_parameter_defaults: Optional[dict[str, float]] = None, + discard_sbml_annotations: bool = False, + **kwargs, +) -> amici.SbmlImporter: + """ + Create AMICI model from PEtab problem + + :param sbml_model: + PEtab SBML model or SBML file name. + Deprecated, pass ``petab_problem`` instead. + + :param condition_table: + PEtab condition table. If provided, parameters from there will be + turned into AMICI constant parameters (i.e. parameters w.r.t. which + no sensitivities will be computed). + Deprecated, pass ``petab_problem`` instead. + + :param observable_table: + PEtab observable table. Deprecated, pass ``petab_problem`` instead. + + :param measurement_table: + PEtab measurement table. Deprecated, pass ``petab_problem`` instead. + + :param petab_problem: + PEtab problem. + + :param model_name: + Name of the generated model. If model file name was provided, + this defaults to the file name without extension, otherwise + the SBML model ID will be used. + + :param model_output_dir: + Directory to write the model code to. Will be created if doesn't + exist. Defaults to current directory. + + :param verbose: + Print/log extra information. + + :param allow_reinit_fixpar_initcond: + See :class:`amici.de_export.ODEExporter`. Must be enabled if initial + states are to be reset after preequilibration. + + :param validate: + Whether to validate the PEtab problem + + :param non_estimated_parameters_as_constants: + Whether parameters marked as non-estimated in PEtab should be + considered constant in AMICI. Setting this to ``True`` will reduce + model size and simulation times. If sensitivities with respect to those + parameters are required, this should be set to ``False``. + + :param output_parameter_defaults: + Optional default parameter values for output parameters introduced in + the PEtab observables table, in particular for placeholder parameters. + dictionary mapping parameter IDs to default values. + + :param discard_sbml_annotations: + Discard information contained in AMICI SBML annotations (debug). + + :param kwargs: + Additional keyword arguments to be passed to + :meth:`amici.sbml_import.SbmlImporter.sbml2amici`. + + :return: + The created :class:`amici.sbml_import.SbmlImporter` instance. + """ + from petab.models.sbml_model import SbmlModel + + set_log_level(logger, verbose) + + logger.info("Importing model ...") + + if any([sbml_model, condition_table, observable_table, measurement_table]): + warn( + "The `sbml_model`, `condition_table`, `observable_table`, and " + "`measurement_table` arguments are deprecated and will be " + "removed in a future version. Use `petab_problem` instead.", + DeprecationWarning, + stacklevel=2, + ) + if petab_problem: + raise ValueError( + "Must not pass a `petab_problem` argument in " + "combination with any of `sbml_model`, " + "`condition_table`, `observable_table`, or " + "`measurement_table`." + ) + + petab_problem = petab.Problem( + model=SbmlModel(sbml_model) + if isinstance(sbml_model, libsbml.Model) + else SbmlModel.from_file(sbml_model), + condition_df=petab.get_condition_df(condition_table), + observable_df=petab.get_observable_df(observable_table), + ) + + if petab_problem.observable_df is None: + raise NotImplementedError( + "PEtab import without observables table " + "is currently not supported." + ) + + assert isinstance(petab_problem.model, SbmlModel) + + if validate: + logger.info("Validating PEtab problem ...") + petab.lint_problem(petab_problem) + + # Model name from SBML ID or filename + if model_name is None: + if not (model_name := petab_problem.model.sbml_model.getId()): + if not isinstance(sbml_model, (str, Path)): + raise ValueError( + "No `model_name` was provided and no model " + "ID was specified in the SBML model." + ) + model_name = os.path.splitext(os.path.split(sbml_model)[-1])[0] + + if model_output_dir is None: + model_output_dir = os.path.join( + os.getcwd(), f"{model_name}-amici{amici.__version__}" + ) + + logger.info( + f"Model name is '{model_name}'.\n" + f"Writing model code to '{model_output_dir}'." + ) + + # Create a copy, because it will be modified by SbmlImporter + sbml_doc = petab_problem.model.sbml_model.getSBMLDocument().clone() + sbml_model = sbml_doc.getModel() + + show_model_info(sbml_model) + + sbml_importer = amici.SbmlImporter( + sbml_model, + discard_annotations=discard_sbml_annotations, + ) + sbml_model = sbml_importer.sbml + + allow_n_noise_pars = ( + not petab.lint.observable_table_has_nontrivial_noise_formula( + petab_problem.observable_df + ) + ) + if ( + petab_problem.measurement_df is not None + and petab.lint.measurement_table_has_timepoint_specific_mappings( + petab_problem.measurement_df, + allow_scalar_numeric_noise_parameters=allow_n_noise_pars, + ) + ): + raise ValueError( + "AMICI does not support importing models with timepoint specific " + "mappings for noise or observable parameters. Please flatten " + "the problem and try again." + ) + + if petab_problem.observable_df is not None: + observables, noise_distrs, sigmas = get_observation_model( + petab_problem.observable_df + ) + else: + observables = noise_distrs = sigmas = None + + logger.info(f"Observables: {len(observables)}") + logger.info(f"Sigmas: {len(sigmas)}") + + if len(sigmas) != len(observables): + raise AssertionError( + f"Number of provided observables ({len(observables)}) and sigmas " + f"({len(sigmas)}) do not match." + ) + + # TODO: adding extra output parameters is currently not supported, + # so we add any output parameters to the SBML model. + # this should be changed to something more elegant + # + formulas = chain( + (val["formula"] for val in observables.values()), sigmas.values() + ) + output_parameters = OrderedDict() + for formula in formulas: + # we want reproducible parameter ordering upon repeated import + free_syms = sorted( + sp.sympify(formula, locals=_clash).free_symbols, + key=lambda symbol: symbol.name, + ) + for free_sym in free_syms: + sym = str(free_sym) + if ( + sbml_model.getElementBySId(sym) is None + and sym != "time" + and sym not in observables + ): + output_parameters[sym] = None + logger.debug( + "Adding output parameters to model: " + f"{list(output_parameters.keys())}" + ) + output_parameter_defaults = output_parameter_defaults or {} + if extra_pars := ( + set(output_parameter_defaults) - set(output_parameters.keys()) + ): + raise ValueError( + f"Default output parameter values were given for {extra_pars}, " + "but they those are not output parameters." + ) + + for par in output_parameters.keys(): + _add_global_parameter( + sbml_model=sbml_model, + parameter_id=par, + value=output_parameter_defaults.get(par, 0.0), + ) + # + + # TODO: to parameterize initial states or compartment sizes, we currently + # need initial assignments. if they occur in the condition table, we + # create a new parameter initial_${speciesOrCompartmentID}. + # feels dirty and should be changed (see also #924) + # + + initial_states = get_states_in_condition_table(petab_problem) + fixed_parameters = [] + if initial_states: + # add preequilibration indicator variable + # NOTE: would only be required if we actually have preequilibration + # adding it anyways. can be optimized-out later + if sbml_model.getParameter(PREEQ_INDICATOR_ID) is not None: + raise AssertionError( + "Model already has a parameter with ID " + f"{PREEQ_INDICATOR_ID}. Cannot handle " + "species and compartments in condition table " + "then." + ) + indicator = sbml_model.createParameter() + indicator.setId(PREEQ_INDICATOR_ID) + indicator.setName(PREEQ_INDICATOR_ID) + # Can only reset parameters after preequilibration if they are fixed. + fixed_parameters.append(PREEQ_INDICATOR_ID) + logger.debug( + "Adding preequilibration indicator " + f"constant {PREEQ_INDICATOR_ID}" + ) + logger.debug(f"Adding initial assignments for {initial_states.keys()}") + for assignee_id in initial_states: + init_par_id_preeq = f"initial_{assignee_id}_preeq" + init_par_id_sim = f"initial_{assignee_id}_sim" + for init_par_id in [init_par_id_preeq, init_par_id_sim]: + if sbml_model.getElementBySId(init_par_id) is not None: + raise ValueError( + "Cannot create parameter for initial assignment " + f"for {assignee_id} because an entity named " + f"{init_par_id} exists already in the model." + ) + init_par = sbml_model.createParameter() + init_par.setId(init_par_id) + init_par.setName(init_par_id) + assignment = sbml_model.getInitialAssignment(assignee_id) + if assignment is None: + assignment = sbml_model.createInitialAssignment() + assignment.setSymbol(assignee_id) + else: + logger.debug( + "The SBML model has an initial assignment defined " + f"for model entity {assignee_id}, but this entity " + "also has an initial value defined in the PEtab " + "condition table. The SBML initial assignment will " + "be overwritten to handle preequilibration and " + "initial values specified by the PEtab problem." + ) + formula = ( + f"{PREEQ_INDICATOR_ID} * {init_par_id_preeq} " + f"+ (1 - {PREEQ_INDICATOR_ID}) * {init_par_id_sim}" + ) + math_ast = libsbml.parseL3Formula(formula) + assignment.setMath(math_ast) + # + + fixed_parameters.extend( + _get_fixed_parameters_sbml( + petab_problem=petab_problem, + non_estimated_parameters_as_constants=non_estimated_parameters_as_constants, + ) + ) + + logger.debug(f"Fixed parameters are {fixed_parameters}") + logger.info(f"Overall fixed parameters: {len(fixed_parameters)}") + logger.info( + "Variable parameters: " + + str(len(sbml_model.getListOfParameters()) - len(fixed_parameters)) + ) + + # Create Python module from SBML model + sbml_importer.sbml2amici( + model_name=model_name, + output_dir=model_output_dir, + observables=observables, + constant_parameters=fixed_parameters, + sigmas=sigmas, + allow_reinit_fixpar_initcond=allow_reinit_fixpar_initcond, + noise_distributions=noise_distrs, + verbose=verbose, + **kwargs, + ) + + if kwargs.get( + "compile", + amici._get_default_argument(sbml_importer.sbml2amici, "compile"), + ): + # check that the model extension was compiled successfully + model_module = amici.import_model_module(model_name, model_output_dir) + model = model_module.getModel() + check_model(amici_model=model, petab_problem=petab_problem) + + return sbml_importer + + +def show_model_info(sbml_model: "libsbml.Model"): + """Log some model quantities""" + + logger.info(f"Species: {len(sbml_model.getListOfSpecies())}") + logger.info( + "Global parameters: " + str(len(sbml_model.getListOfParameters())) + ) + logger.info(f"Reactions: {len(sbml_model.getListOfReactions())}") + + +# TODO - remove?! +def species_to_parameters( + species_ids: list[str], sbml_model: "libsbml.Model" +) -> list[str]: + """ + Turn a SBML species into parameters and replace species references + inside the model instance. + + :param species_ids: + list of SBML species ID to convert to parameters with the same ID as + the replaced species. + + :param sbml_model: + SBML model to modify + + :return: + list of IDs of species which have been converted to parameters + """ + transformables = [] + + for species_id in species_ids: + species = sbml_model.getSpecies(species_id) + + if species.getHasOnlySubstanceUnits(): + logger.warning( + f"Ignoring {species.getId()} which has only substance units." + " Conversion not yet implemented." + ) + continue + + if math.isnan(species.getInitialConcentration()): + logger.warning( + f"Ignoring {species.getId()} which has no initial " + "concentration. Amount conversion not yet implemented." + ) + continue + + transformables.append(species_id) + + # Must not remove species while iterating over getListOfSpecies() + for species_id in transformables: + species = sbml_model.removeSpecies(species_id) + par = sbml_model.createParameter() + par.setId(species.getId()) + par.setName(species.getName()) + par.setConstant(True) + par.setValue(species.getInitialConcentration()) + par.setUnits(species.getUnits()) + + # Remove from reactants and products + for reaction in sbml_model.getListOfReactions(): + for species_id in transformables: + # loop, since removeX only removes one instance + while reaction.removeReactant(species_id): + # remove from reactants + pass + while reaction.removeProduct(species_id): + # remove from products + pass + while reaction.removeModifier(species_id): + # remove from modifiers + pass + + return transformables + + +def _add_global_parameter( + sbml_model: libsbml.Model, + parameter_id: str, + parameter_name: str = None, + constant: bool = False, + units: str = "dimensionless", + value: float = 0.0, +) -> libsbml.Parameter: + """Add new global parameter to SBML model + + Arguments: + sbml_model: SBML model + parameter_id: ID of the new parameter + parameter_name: Name of the new parameter + constant: Is parameter constant? + units: SBML unit ID + value: parameter value + + Returns: + The created parameter + """ + if parameter_name is None: + parameter_name = parameter_id + + p = sbml_model.createParameter() + p.setId(parameter_id) + p.setName(parameter_name) + p.setConstant(constant) + p.setValue(value) + p.setUnits(units) + return p + + +def _get_fixed_parameters_sbml( + petab_problem: petab.Problem, + non_estimated_parameters_as_constants=True, +) -> list[str]: + """ + Determine, set and return fixed model parameters. + + Non-estimated parameters and parameters specified in the condition table + are turned into constants (unless they are overridden). + Only global SBML parameters are considered. Local parameters are ignored. + + :param petab_problem: + The PEtab problem instance + + :param non_estimated_parameters_as_constants: + Whether parameters marked as non-estimated in PEtab should be + considered constant in AMICI. Setting this to ``True`` will reduce + model size and simulation times. If sensitivities with respect to those + parameters are required, this should be set to ``False``. + + :return: + list of IDs of parameters which are to be considered constant. + """ + if not petab_problem.model.type_id == MODEL_TYPE_SBML: + raise ValueError("Not an SBML model.") + # initial concentrations for species or initial compartment sizes in + # condition table will need to be turned into fixed parameters + + # if there is no initial assignment for that species, we'd need + # to create one. to avoid any naming collision right away, we don't + # allow that for now + + # we can't handle them yet + compartments = [ + col + for col in petab_problem.condition_df + if petab_problem.model.sbml_model.getCompartment(col) is not None + ] + if compartments: + raise NotImplementedError( + "Can't handle initial compartment sizes " + "at the moment. Consider creating an " + f"initial assignment for {compartments}" + ) + + fixed_parameters = get_fixed_parameters( + petab_problem, non_estimated_parameters_as_constants + ) + + # exclude targets of rules or initial assignments + sbml_model = petab_problem.model.sbml_model + for fixed_parameter in fixed_parameters.copy(): + # check global parameters + if sbml_model.getInitialAssignmentBySymbol( + fixed_parameter + ) or sbml_model.getRuleByVariable(fixed_parameter): + fixed_parameters.remove(fixed_parameter) + + return list(sorted(fixed_parameters)) + + +def _create_model_output_dir_name( + sbml_model: "libsbml.Model", model_name: Optional[str] = None +) -> Path: + """ + Find a folder for storing the compiled amici model. + If possible, use the sbml model id, otherwise create a random folder. + The folder will be located in the `amici_models` subfolder of the current + folder. + """ + BASE_DIR = Path("amici_models").absolute() + BASE_DIR.mkdir(exist_ok=True) + # try model_name + if model_name: + return BASE_DIR / model_name + + # try sbml model id + if sbml_model_id := sbml_model.getId(): + return BASE_DIR / sbml_model_id + + # create random folder name + return Path(tempfile.mkdtemp(dir=BASE_DIR)) diff --git a/python/sdist/amici/petab/simulations.py b/python/sdist/amici/petab/simulations.py new file mode 100644 index 0000000000..0f9aae3bfd --- /dev/null +++ b/python/sdist/amici/petab/simulations.py @@ -0,0 +1,483 @@ +"""Functionality related to simulation of PEtab problems. + +Functionality related to running simulations or evaluating the objective +function as defined by a PEtab problem. +""" +import copy +import logging +from typing import Any, Optional, Union +from collections.abc import Sequence + +import amici +import numpy as np +import pandas as pd +import petab +from petab.C import * # noqa: F403 + +from .. import AmiciExpData, AmiciModel +from ..logging import get_logger, log_execution_time + +# some extra imports for backward-compatibility +# DEPRECATED: remove in 1.0 +from .conditions import ( # noqa # pylint: disable=unused-import + create_edata_for_condition, + create_edatas, + create_parameterized_edatas, + fill_in_parameters, +) +from .parameter_mapping import ( # noqa # pylint: disable=unused-import + ParameterMapping, + create_parameter_mapping, + create_parameter_mapping_for_condition, +) +from .util import ( # noqa # pylint: disable=unused-import + get_states_in_condition_table, +) + +# END DEPRECATED + +try: + import pysb +except ImportError: + pysb = None + +logger = get_logger(__name__) + + +# string constant definitions +LLH = "llh" +SLLH = "sllh" +FIM = "fim" +S2LLH = "s2llh" +RES = "res" +SRES = "sres" +RDATAS = "rdatas" +EDATAS = "edatas" + + +__all__ = [ + "simulate_petab", + "LLH", + "SLLH", + "FIM", + "S2LLH", + "RES", + "SRES", + "RDATAS", + "EDATAS", +] + + +@log_execution_time("Simulating PEtab model", logger) +def simulate_petab( + petab_problem: petab.Problem, + amici_model: AmiciModel, + solver: Optional[amici.Solver] = None, + problem_parameters: Optional[dict[str, float]] = None, + simulation_conditions: Union[pd.DataFrame, dict] = None, + edatas: list[AmiciExpData] = None, + parameter_mapping: ParameterMapping = None, + scaled_parameters: Optional[bool] = False, + log_level: int = logging.WARNING, + num_threads: int = 1, + failfast: bool = True, + scaled_gradients: bool = False, +) -> dict[str, Any]: + """Simulate PEtab model. + + .. note:: + Regardless of `scaled_parameters`, unscaled sensitivities are returned, + unless `scaled_gradients=True`. + + :param petab_problem: + PEtab problem to work on. + :param amici_model: + AMICI Model assumed to be compatible with ``petab_problem``. + :param solver: + An AMICI solver. Will use default options if None. + :param problem_parameters: + Run simulation with these parameters. If ``None``, PEtab + ``nominalValues`` will be used. To be provided as dict, mapping PEtab + problem parameters to SBML IDs. + :param simulation_conditions: + Result of :py:func:`petab.get_simulation_conditions`. Can be provided + to save time if this has be obtained before. + Not required if ``edatas`` and ``parameter_mapping`` are provided. + :param edatas: + Experimental data. Parameters are inserted in-place for simulation. + :param parameter_mapping: + Optional precomputed PEtab parameter mapping for efficiency, as + generated by :py:func:`create_parameter_mapping` with + ``scaled_parameters=True``. + :param scaled_parameters: + If ``True``, ``problem_parameters`` are assumed to be on the scale + provided in the PEtab parameter table and will be unscaled. + If ``False``, they are assumed to be in linear scale. + If `parameter_mapping` is provided, this must match the value of + `scaled_parameters` used to generate the mapping. + :param log_level: + Log level, see :mod:`amici.logging` module. + :param num_threads: + Number of threads to use for simulating multiple conditions + (only used if compiled with OpenMP). + :param failfast: + Returns as soon as an integration failure is encountered, skipping + any remaining simulations. + :param scaled_gradients: + Whether to compute gradients on parameter scale (``True``) or not + (``False``). + + :return: + Dictionary of + + * cost function value (``LLH``), + * list of :class:`amici.amici.ReturnData` (``RDATAS``), + * list of :class:`amici.amici.ExpData` (``EDATAS``), + + corresponding to the different simulation conditions. + For ordering of simulation conditions, see + :meth:`petab.Problem.get_simulation_conditions_from_measurement_df`. + """ + logger.setLevel(log_level) + + if solver is None: + solver = amici_model.getSolver() + + # number of amici simulations will be number of unique + # (preequilibrationConditionId, simulationConditionId) pairs. + # Can be optimized by checking for identical condition vectors. + if ( + simulation_conditions is None + and parameter_mapping is None + and edatas is None + ): + simulation_conditions = ( + petab_problem.get_simulation_conditions_from_measurement_df() + ) + + # Get parameter mapping + if parameter_mapping is None: + parameter_mapping = create_parameter_mapping( + petab_problem=petab_problem, + simulation_conditions=simulation_conditions, + # we will always use scaled parameters internally + scaled_parameters=True, + amici_model=amici_model, + ) + + if problem_parameters is None: + # scaled PEtab nominal values + problem_parameters = dict( + zip( + petab_problem.x_ids, + petab_problem.x_nominal_scaled, + ) + ) + # depending on `fill_fixed_parameters` for parameter mapping, the + # parameter mapping may contain values instead of symbols for fixed + # parameters. In this case, we need to filter them here to avoid + # warnings in `fill_in_parameters`. + free_parameters = parameter_mapping.free_symbols + problem_parameters = { + par_id: par_value + for par_id, par_value in problem_parameters.items() + if par_id in free_parameters + } + + elif not scaled_parameters: + problem_parameters = petab_problem.scale_parameters(problem_parameters) + + scaled_parameters = True + + # Get edatas + if edatas is None: + # Generate ExpData with all condition-specific information + edatas = create_edatas( + amici_model=amici_model, + petab_problem=petab_problem, + simulation_conditions=simulation_conditions, + ) + + # Fill parameters in ExpDatas (in-place) + fill_in_parameters( + edatas=edatas, + problem_parameters=problem_parameters, + scaled_parameters=scaled_parameters, + parameter_mapping=parameter_mapping, + amici_model=amici_model, + ) + + # Simulate + rdatas = amici.runAmiciSimulations( + amici_model, + solver, + edata_list=edatas, + num_threads=num_threads, + failfast=failfast, + ) + + # Compute total llh + llh = sum(rdata["llh"] for rdata in rdatas) + # Compute total sllh + sllh = None + if solver.getSensitivityOrder() != amici.SensitivityOrder.none: + sllh = aggregate_sllh( + amici_model=amici_model, + rdatas=rdatas, + parameter_mapping=parameter_mapping, + petab_scale=scaled_parameters, + petab_problem=petab_problem, + edatas=edatas, + ) + if not scaled_gradients and sllh is not None: + sllh = { + parameter_id: rescale_sensitivity( + sensitivity=sensitivity, + parameter_value=problem_parameters[parameter_id], + old_scale=petab_problem.parameter_df.loc[ + parameter_id, PARAMETER_SCALE + ], + new_scale=LIN, + ) + for parameter_id, sensitivity in sllh.items() + } + + # Log results + sim_cond = petab_problem.get_simulation_conditions_from_measurement_df() + for i, rdata in enumerate(rdatas): + sim_cond_id = "N/A" if sim_cond.empty else sim_cond.iloc[i, :].values + logger.debug( + f"Condition: {sim_cond_id}, status: {rdata['status']}, " + f"llh: {rdata['llh']}" + ) + + return { + LLH: llh, + SLLH: sllh, + RDATAS: rdatas, + EDATAS: edatas, + } + + +def aggregate_sllh( + amici_model: AmiciModel, + rdatas: Sequence[amici.ReturnDataView], + parameter_mapping: Optional[ParameterMapping], + edatas: list[AmiciExpData], + petab_scale: bool = True, + petab_problem: petab.Problem = None, +) -> Union[None, dict[str, float]]: + """ + Aggregate likelihood gradient for all conditions, according to PEtab + parameter mapping. + + :param amici_model: + AMICI model from which ``rdatas`` were obtained. + :param rdatas: + Simulation results. + :param parameter_mapping: + PEtab parameter mapping to condition-specific simulation parameters. + :param edatas: + Experimental data used for simulation. + :param petab_scale: + Whether to check that sensitivities were computed with parameters on + the scales provided in the PEtab parameters table. + :param petab_problem: + The PEtab problem that defines the parameter scales. + + :return: + Aggregated likelihood sensitivities. + """ + accumulated_sllh = {} + model_parameter_ids = amici_model.getParameterIds() + + if petab_scale and petab_problem is None: + raise ValueError( + "Please provide the PEtab problem, when using " + "`petab_scale=True`." + ) + + # Check for issues in all condition simulation results. + for rdata in rdatas: + # Condition failed during simulation. + if rdata.status != amici.AMICI_SUCCESS: + return None + # Condition simulation result does not provide SLLH. + if rdata.sllh is None: + raise ValueError( + "The sensitivities of the likelihood for a condition were " + "not computed." + ) + + for condition_parameter_mapping, edata, rdata in zip( + parameter_mapping, edatas, rdatas + ): + for sllh_parameter_index, condition_parameter_sllh in enumerate( + rdata.sllh + ): + # Get PEtab parameter ID + # Use ExpData if it provides a parameter list, else default to + # Model. + if edata.plist: + model_parameter_index = edata.plist[sllh_parameter_index] + else: + model_parameter_index = amici_model.plist(sllh_parameter_index) + model_parameter_id = model_parameter_ids[model_parameter_index] + petab_parameter_id = condition_parameter_mapping.map_sim_var[ + model_parameter_id + ] + + # Initialize + if petab_parameter_id not in accumulated_sllh: + accumulated_sllh[petab_parameter_id] = 0 + + # Check that the scale is consistent + if petab_scale: + # `ParameterMappingForCondition` objects provide the scale in + # terms of `petab.C` constants already, not AMICI equivalents. + model_parameter_scale = ( + condition_parameter_mapping.scale_map_sim_var[ + model_parameter_id + ] + ) + petab_parameter_scale = petab_problem.parameter_df.loc[ + petab_parameter_id, PARAMETER_SCALE + ] + if model_parameter_scale != petab_parameter_scale: + raise ValueError( + f"The scale of the parameter `{petab_parameter_id}` " + "differs between the AMICI model " + f"({model_parameter_scale}) and the PEtab problem " + f"({petab_parameter_scale})." + ) + + # Accumulate + accumulated_sllh[petab_parameter_id] += condition_parameter_sllh + + return accumulated_sllh + + +def rescale_sensitivity( + sensitivity: float, + parameter_value: float, + old_scale: str, + new_scale: str, +) -> float: + """Rescale a sensitivity between parameter scales. + + :param sensitivity: + The sensitivity corresponding to the parameter value. + :param parameter_value: + The parameter vector element, on ``old_scale``. + :param old_scale: + The scale of the parameter value. + :param new_scale: + The parameter scale on which to rescale the sensitivity. + + :return: + The rescaled sensitivity. + """ + LOG_E_10 = np.log(10) + + if old_scale == new_scale: + return sensitivity + + unscaled_parameter_value = petab.parameters.unscale( + parameter=parameter_value, + scale_str=old_scale, + ) + + scale = { + (LIN, LOG): lambda s: s * unscaled_parameter_value, + (LOG, LIN): lambda s: s / unscaled_parameter_value, + (LIN, LOG10): lambda s: s * (unscaled_parameter_value * LOG_E_10), + (LOG10, LIN): lambda s: s / (unscaled_parameter_value * LOG_E_10), + } + + scale[(LOG, LOG10)] = lambda s: scale[(LIN, LOG10)](scale[(LOG, LIN)](s)) + scale[(LOG10, LOG)] = lambda s: scale[(LIN, LOG)](scale[(LOG10, LIN)](s)) + + if (old_scale, new_scale) not in scale: + raise NotImplementedError( + f"Old scale: {old_scale}. New scale: {new_scale}." + ) + + return scale[(old_scale, new_scale)](sensitivity) + + +def rdatas_to_measurement_df( + rdatas: Sequence[amici.ReturnData], + model: AmiciModel, + measurement_df: pd.DataFrame, +) -> pd.DataFrame: + """ + Create a measurement dataframe in the PEtab format from the passed + ``rdatas`` and own information. + + :param rdatas: + A sequence of rdatas with the ordering of + :func:`petab.get_simulation_conditions`. + + :param model: + AMICI model used to generate ``rdatas``. + + :param measurement_df: + PEtab measurement table used to generate ``rdatas``. + + :return: + A dataframe built from the rdatas in the format of ``measurement_df``. + """ + simulation_conditions = petab.get_simulation_conditions(measurement_df) + + observable_ids = model.getObservableIds() + rows = [] + # iterate over conditions + for (_, condition), rdata in zip(simulation_conditions.iterrows(), rdatas): + # current simulation matrix + y = rdata.y + # time array used in rdata + t = list(rdata.ts) + + # extract rows for condition + cur_measurement_df = petab.get_rows_for_condition( + measurement_df, condition + ) + + # iterate over entries for the given condition + # note: this way we only generate a dataframe entry for every + # row that existed in the original dataframe. if we want to + # e.g. have also timepoints non-existent in the original file, + # we need to instead iterate over the rdata['y'] entries + for _, row in cur_measurement_df.iterrows(): + # copy row + row_sim = copy.deepcopy(row) + + # extract simulated measurement value + timepoint_idx = t.index(row[TIME]) + observable_idx = observable_ids.index(row[OBSERVABLE_ID]) + measurement_sim = y[timepoint_idx, observable_idx] + + # change measurement entry + row_sim[MEASUREMENT] = measurement_sim + + rows.append(row_sim) + + return pd.DataFrame(rows) + + +def rdatas_to_simulation_df( + rdatas: Sequence[amici.ReturnData], + model: AmiciModel, + measurement_df: pd.DataFrame, +) -> pd.DataFrame: + """Create a PEtab simulation dataframe from + :class:`amici.amici.ReturnData` s. + + See :func:`rdatas_to_measurement_df` for details, only that model outputs + will appear in column ``simulation`` instead of ``measurement``.""" + + df = rdatas_to_measurement_df( + rdatas=rdatas, model=model, measurement_df=measurement_df + ) + + return df.rename(columns={MEASUREMENT: SIMULATION}) diff --git a/python/sdist/amici/petab/simulator.py b/python/sdist/amici/petab/simulator.py new file mode 100644 index 0000000000..a5f50112cc --- /dev/null +++ b/python/sdist/amici/petab/simulator.py @@ -0,0 +1,109 @@ +""" +PEtab Simulator +--------------- +Functionality related to the use of AMICI for simulation with :class:`petab.Simulator`. + +Use cases: + +- generate data for use with PEtab's plotting methods +- generate synthetic data +""" + +import inspect +import sys +from typing import Callable + +import pandas as pd +import petab +from amici import AmiciModel, SensitivityMethod_none + +from .petab_import import import_petab_problem +from .simulations import RDATAS, rdatas_to_measurement_df, simulate_petab + +AMICI_MODEL = "amici_model" +AMICI_SOLVER = "solver" +MODEL_NAME = "model_name" +MODEL_OUTPUT_DIR = "model_output_dir" + +PETAB_PROBLEM = "petab_problem" + + +class PetabSimulator(petab.simulate.Simulator): + """Implementation of the PEtab `Simulator` class that uses AMICI.""" + + def __init__(self, *args, amici_model: AmiciModel = None, **kwargs): + super().__init__(*args, **kwargs) + self.amici_model = amici_model + + def simulate_without_noise(self, **kwargs) -> pd.DataFrame: + """ + See :py:func:`petab.simulate.Simulator.simulate()` docstring. + + Additional keyword arguments can be supplied to specify arguments for + the AMICI PEtab import, simulate, and export methods. See the + docstrings for the respective methods for argument options: + - :py:func:`amici.petab_import.import_petab_problem`, and + - :py:func:`amici.petab_objective.simulate_petab`. + + Note that some arguments are expected to have already been specified + in the Simulator constructor (including the PEtab problem). + """ + if AMICI_MODEL in {*kwargs, *dir(self)} and ( + any( + k in kwargs + for k in inspect.signature(import_petab_problem).parameters + ) + ): + print( + "Arguments related to the PEtab import are unused if " + f"`{AMICI_MODEL}` is specified, or the " + "`PetabSimulator.simulate()` method was previously called." + ) + + kwargs[PETAB_PROBLEM] = self.petab_problem + + # The AMICI model instance for the PEtab problem is saved in the state, + # such that it need not be supplied with each request for simulated + # data. Any user-supplied AMICI model will overwrite the model saved + # in the state. + if AMICI_MODEL not in kwargs: + if self.amici_model is None: + if MODEL_NAME not in kwargs: + kwargs[MODEL_NAME] = AMICI_MODEL + # If the model name is the name of a module that is already + # cached, it can cause issues during import. + while kwargs[MODEL_NAME] in sys.modules: + kwargs[MODEL_NAME] += str(self.rng.integers(10)) + if MODEL_OUTPUT_DIR not in kwargs: + kwargs[MODEL_OUTPUT_DIR] = self.working_dir + self.amici_model = _subset_call(import_petab_problem, kwargs) + kwargs[AMICI_MODEL] = self.amici_model + self.amici_model = kwargs[AMICI_MODEL] + + if AMICI_SOLVER not in kwargs: + kwargs[AMICI_SOLVER] = self.amici_model.getSolver() + kwargs[AMICI_SOLVER].setSensitivityMethod(SensitivityMethod_none) + + result = _subset_call(simulate_petab, kwargs) + return rdatas_to_measurement_df( + result[RDATAS], self.amici_model, self.petab_problem.measurement_df + ) + + +def _subset_call(method: Callable, kwargs: dict): + """ + Helper function to call a method with the intersection of arguments in the + method signature and the supplied arguments. + + :param method: + The method to be called. + :param kwargs: + The argument superset as a dictionary, similar to ``**kwargs`` in + method signatures. + :return: + The output of ``method``, called with the applicable arguments in + ``kwargs``. + """ + method_args = inspect.signature(method).parameters + subset_kwargs = {k: v for k, v in kwargs.items() if k in method_args} + return method(**subset_kwargs) diff --git a/python/sdist/amici/petab/util.py b/python/sdist/amici/petab/util.py new file mode 100644 index 0000000000..742f7bdfe3 --- /dev/null +++ b/python/sdist/amici/petab/util.py @@ -0,0 +1,106 @@ +"""Various helper functions for working with PEtab problems.""" +import re +from typing import TYPE_CHECKING, Union + +import libsbml +import pandas as pd +import petab +from petab.C import PREEQUILIBRATION_CONDITION_ID, SIMULATION_CONDITION_ID +from petab.mapping import resolve_mapping +from petab.models import MODEL_TYPE_PYSB, MODEL_TYPE_SBML + +if TYPE_CHECKING: + pysb = None + + +def get_states_in_condition_table( + petab_problem: petab.Problem, + condition: Union[dict, pd.Series] = None, + return_patterns: bool = False, +) -> dict[str, tuple[Union[float, str, None], Union[float, str, None]]]: + """Get states and their initial condition as specified in the condition table. + + Returns: Dictionary: ``stateId -> (initial condition simulation, initial condition preequilibration)`` + """ + if petab_problem.model.type_id not in (MODEL_TYPE_SBML, MODEL_TYPE_PYSB): + raise NotImplementedError() + + species_check_funs = { + MODEL_TYPE_SBML: lambda x: _element_is_sbml_state( + petab_problem.sbml_model, x + ), + MODEL_TYPE_PYSB: lambda x: _element_is_pysb_pattern( + petab_problem.model.model, x + ), + } + states = { + resolve_mapping(petab_problem.mapping_df, col): (None, None) + if condition is None + else ( + petab_problem.condition_df.loc[ + condition[SIMULATION_CONDITION_ID], col + ], + petab_problem.condition_df.loc[ + condition[PREEQUILIBRATION_CONDITION_ID], col + ] + if PREEQUILIBRATION_CONDITION_ID in condition + else None, + ) + for col in petab_problem.condition_df.columns + if species_check_funs[petab_problem.model.type_id]( + resolve_mapping(petab_problem.mapping_df, col) + ) + } + + if petab_problem.model.type_id == MODEL_TYPE_PYSB: + if return_patterns: + return states + import pysb.pattern + + if not petab_problem.model.model.species: + import pysb.bng + + pysb.bng.generate_equations(petab_problem.model.model) + + try: + spm = pysb.pattern.SpeciesPatternMatcher( + model=petab_problem.model.model + ) + except NotImplementedError: + raise NotImplementedError( + "Requires https://github.com/pysb/pysb/pull/570. " + "To use this functionality, update pysb via " + "`pip install git+https://github.com/FFroehlich/pysb@fix_pattern_matching`" + ) + + # expose model components as variables so we can evaluate patterns + for c in petab_problem.model.model.components: + globals()[c.name] = c + + states = { + f"__s{ix}": value + for pattern, value in states.items() + for ix in spm.match(eval(pattern), index=True, exact=True) + } + return states + + +def _element_is_pysb_pattern(model: "pysb.Model", element: str) -> bool: + """Check if element is a pysb pattern""" + if match := re.match(r"[a-zA-Z_][\w_]*\(", element): + return match[0][:-1] in [m.name for m in model.monomers] + return False + + +def _element_is_sbml_state(sbml_model: libsbml.Model, sbml_id: str) -> bool: + """Does the element with ID `sbml_id` correspond to a state variable?""" + if sbml_model.getCompartment(sbml_id) is not None: + return True + if sbml_model.getSpecies(sbml_id) is not None: + return True + if ( + rule := sbml_model.getRuleByVariable(sbml_id) + ) is not None and rule.getTypeCode() == libsbml.SBML_RATE_RULE: + return True + + return False diff --git a/python/sdist/amici/petab_import.py b/python/sdist/amici/petab_import.py index 23fe4394f0..d5c67753ac 100644 --- a/python/sdist/amici/petab_import.py +++ b/python/sdist/amici/petab_import.py @@ -3,1052 +3,43 @@ ------------ Import a model in the :mod:`petab` (https://github.com/PEtab-dev/PEtab) format into AMICI. -""" -import argparse -import importlib -import logging -import math -import os -import re -import shutil -import tempfile -from itertools import chain -from pathlib import Path -from typing import Dict, List, Optional, Tuple, Union -from warnings import warn - -import amici -import libsbml -import pandas as pd -import petab -import sympy as sp -from _collections import OrderedDict -from amici.logging import get_logger, log_execution_time, set_log_level -from petab.C import * -from petab.models import MODEL_TYPE_PYSB, MODEL_TYPE_SBML -from petab.parameters import get_valid_parameters_for_parameter_table -from sympy.abc import _clash - -from .petab_util import PREEQ_INDICATOR_ID, get_states_in_condition_table - -try: - from amici.petab_import_pysb import import_model_pysb -except ModuleNotFoundError: - # pysb not available - import_model_pysb = None - -logger = get_logger(__name__, logging.WARNING) - - -def _add_global_parameter( - sbml_model: libsbml.Model, - parameter_id: str, - parameter_name: str = None, - constant: bool = False, - units: str = "dimensionless", - value: float = 0.0, -) -> libsbml.Parameter: - """Add new global parameter to SBML model - - Arguments: - sbml_model: SBML model - parameter_id: ID of the new parameter - parameter_name: Name of the new parameter - constant: Is parameter constant? - units: SBML unit ID - value: parameter value - - Returns: - The created parameter - """ - if parameter_name is None: - parameter_name = parameter_id - - p = sbml_model.createParameter() - p.setId(parameter_id) - p.setName(parameter_name) - p.setConstant(constant) - p.setValue(value) - p.setUnits(units) - return p - - -def get_fixed_parameters( - petab_problem: petab.Problem, - non_estimated_parameters_as_constants=True, -) -> List[str]: - """ - Determine, set and return fixed model parameters. - - Non-estimated parameters and parameters specified in the condition table - are turned into constants (unless they are overridden). - Only global SBML parameters are considered. Local parameters are ignored. - - :param petab_problem: - The PEtab problem instance - - :param non_estimated_parameters_as_constants: - Whether parameters marked as non-estimated in PEtab should be - considered constant in AMICI. Setting this to ``True`` will reduce - model size and simulation times. If sensitivities with respect to those - parameters are required, this should be set to ``False``. - - :return: - List of IDs of parameters which are to be considered constant. - """ - if petab_problem.model.type_id == MODEL_TYPE_SBML: - # initial concentrations for species or initial compartment sizes in - # condition table will need to be turned into fixed parameters - - # if there is no initial assignment for that species, we'd need - # to create one. to avoid any naming collision right away, we don't - # allow that for now - - # we can't handle them yet - compartments = [ - col - for col in petab_problem.condition_df - if petab_problem.model.sbml_model.getCompartment(col) is not None - ] - if compartments: - raise NotImplementedError( - "Can't handle initial compartment sizes " - "at the moment. Consider creating an " - f"initial assignment for {compartments}" - ) - - # if we have a parameter table, all parameters that are allowed to be - # listed in the parameter table, but are not marked as estimated, can be - # turned into AMICI constants - # due to legacy API, we might not always have a parameter table, though - fixed_parameters = set() - if petab_problem.parameter_df is not None: - all_parameters = get_valid_parameters_for_parameter_table( - model=petab_problem.model, - condition_df=petab_problem.condition_df, - observable_df=petab_problem.observable_df - if petab_problem.observable_df is not None - else pd.DataFrame(columns=petab.OBSERVABLE_DF_REQUIRED_COLS), - measurement_df=petab_problem.measurement_df - if petab_problem.measurement_df is not None - else pd.DataFrame(columns=petab.MEASUREMENT_DF_REQUIRED_COLS), - ) - if non_estimated_parameters_as_constants: - estimated_parameters = petab_problem.parameter_df.index.values[ - petab_problem.parameter_df[ESTIMATE] == 1 - ] - else: - # don't treat parameter table parameters as constants - estimated_parameters = petab_problem.parameter_df.index.values - fixed_parameters = set(all_parameters) - set(estimated_parameters) - - # Column names are model parameter IDs, compartment IDs or species IDs. - # Thereof, all parameters except for any overridden ones should be made - # constant. - # (Could potentially still be made constant, but leaving them might - # increase model reusability) - - # handle parameters in condition table - condition_df = petab_problem.condition_df - if condition_df is not None: - logger.debug(f"Condition table: {condition_df.shape}") - - # remove overridden parameters (`object`-type columns) - fixed_parameters.update( - p - for p in condition_df.columns - # get rid of conditionName column - if p != CONDITION_NAME - # there is no parametric override - # TODO: could check if the final overriding parameter is estimated - # or not, but for now, we skip the parameter if there is any kind - # of overriding - if condition_df[p].dtype != "O" - # p is a parameter - and not petab_problem.model.is_state_variable(p) - ) - - # Ensure mentioned parameters exist in the model. Remove additional ones - # from list - for fixed_parameter in fixed_parameters.copy(): - # check global parameters - if not petab_problem.model.has_entity_with_id(fixed_parameter): - # TODO: could still exist as an output parameter? - logger.warning( - f"Column '{fixed_parameter}' used in condition " - "table but not entity with the corresponding ID " - "exists. Ignoring." - ) - fixed_parameters.remove(fixed_parameter) - - if petab_problem.model.type_id == MODEL_TYPE_SBML: - # exclude targets of rules or initial assignments - sbml_model = petab_problem.model.sbml_model - for fixed_parameter in fixed_parameters.copy(): - # check global parameters - if sbml_model.getInitialAssignmentBySymbol( - fixed_parameter - ) or sbml_model.getRuleByVariable(fixed_parameter): - fixed_parameters.remove(fixed_parameter) - - return list(sorted(fixed_parameters)) - - -def species_to_parameters( - species_ids: List[str], sbml_model: "libsbml.Model" -) -> List[str]: - """ - Turn a SBML species into parameters and replace species references - inside the model instance. - - :param species_ids: - List of SBML species ID to convert to parameters with the same ID as - the replaced species. - - :param sbml_model: - SBML model to modify - - :return: - List of IDs of species which have been converted to parameters - """ - transformables = [] - - for species_id in species_ids: - species = sbml_model.getSpecies(species_id) - - if species.getHasOnlySubstanceUnits(): - logger.warning( - f"Ignoring {species.getId()} which has only substance units." - " Conversion not yet implemented." - ) - continue - - if math.isnan(species.getInitialConcentration()): - logger.warning( - f"Ignoring {species.getId()} which has no initial " - "concentration. Amount conversion not yet implemented." - ) - continue - - transformables.append(species_id) - - # Must not remove species while iterating over getListOfSpecies() - for species_id in transformables: - species = sbml_model.removeSpecies(species_id) - par = sbml_model.createParameter() - par.setId(species.getId()) - par.setName(species.getName()) - par.setConstant(True) - par.setValue(species.getInitialConcentration()) - par.setUnits(species.getUnits()) - - # Remove from reactants and products - for reaction in sbml_model.getListOfReactions(): - for species_id in transformables: - # loop, since removeX only removes one instance - while reaction.removeReactant(species_id): - # remove from reactants - pass - while reaction.removeProduct(species_id): - # remove from products - pass - while reaction.removeModifier(species_id): - # remove from modifiers - pass - - return transformables - - -def import_petab_problem( - petab_problem: petab.Problem, - model_output_dir: Union[str, Path, None] = None, - model_name: str = None, - force_compile: bool = False, - non_estimated_parameters_as_constants=True, - **kwargs, -) -> "amici.Model": - """ - Import model from petab problem. - - :param petab_problem: - A petab problem containing all relevant information on the model. - - :param model_output_dir: - Directory to write the model code to. Will be created if doesn't - exist. Defaults to current directory. - - :param model_name: - Name of the generated model. If model file name was provided, - this defaults to the file name without extension, otherwise - the model ID will be used. - - :param force_compile: - Whether to compile the model even if the target folder is not empty, - or the model exists already. - - :param non_estimated_parameters_as_constants: - Whether parameters marked as non-estimated in PEtab should be - considered constant in AMICI. Setting this to ``True`` will reduce - model size and simulation times. If sensitivities with respect to those - parameters are required, this should be set to ``False``. - - :param kwargs: - Additional keyword arguments to be passed to - :meth:`amici.sbml_import.SbmlImporter.sbml2amici`. - - :return: - The imported model. - """ - if petab_problem.model.type_id not in (MODEL_TYPE_SBML, MODEL_TYPE_PYSB): - raise NotImplementedError( - "Unsupported model type " + petab_problem.model.type_id - ) - - if petab_problem.mapping_df is not None: - # It's partially supported. Remove at your own risk... - raise NotImplementedError( - "PEtab v2.0.0 mapping tables are not yet supported." - ) - - model_name = model_name or petab_problem.model.model_id - - if petab_problem.model.type_id == MODEL_TYPE_PYSB and model_name is None: - model_name = petab_problem.pysb_model.name - elif model_name is None and model_output_dir: - model_name = _create_model_name(model_output_dir) - - # generate folder and model name if necessary - if model_output_dir is None: - if petab_problem.model.type_id == MODEL_TYPE_PYSB: - raise ValueError("Parameter `model_output_dir` is required.") - - model_output_dir = _create_model_output_dir_name( - petab_problem.sbml_model, model_name - ) - else: - model_output_dir = os.path.abspath(model_output_dir) - - # create folder - if not os.path.exists(model_output_dir): - os.makedirs(model_output_dir) - - # check if compilation necessary - if force_compile or not _can_import_model(model_name, model_output_dir): - # check if folder exists - if os.listdir(model_output_dir) and not force_compile: - raise ValueError( - f"Cannot compile to {model_output_dir}: not empty. " - "Please assign a different target or set `force_compile`." - ) - - # remove folder if exists - if os.path.exists(model_output_dir): - shutil.rmtree(model_output_dir) - - logger.info(f"Compiling model {model_name} to {model_output_dir}.") - # compile the model - if petab_problem.model.type_id == MODEL_TYPE_PYSB: - import_model_pysb( - petab_problem, - model_name=model_name, - model_output_dir=model_output_dir, - **kwargs, - ) - else: - import_model_sbml( - petab_problem=petab_problem, - model_name=model_name, - model_output_dir=model_output_dir, - non_estimated_parameters_as_constants=non_estimated_parameters_as_constants, - **kwargs, - ) - - # import model - model_module = amici.import_model_module(model_name, model_output_dir) - model = model_module.getModel() - check_model(amici_model=model, petab_problem=petab_problem) - - logger.info( - f"Successfully loaded model {model_name} " f"from {model_output_dir}." - ) - - return model - - -def check_model( - amici_model: amici.Model, - petab_problem: petab.Problem, -) -> None: - """Check that the model is consistent with the PEtab problem.""" - if petab_problem.parameter_df is None: - return - - amici_ids_free = set(amici_model.getParameterIds()) - amici_ids = amici_ids_free | set(amici_model.getFixedParameterIds()) - - petab_ids_free = set( - petab_problem.parameter_df.loc[ - petab_problem.parameter_df[ESTIMATE] == 1 - ].index - ) - - amici_ids_free_required = petab_ids_free.intersection(amici_ids) - - if not amici_ids_free_required.issubset(amici_ids_free): - raise ValueError( - "The available AMICI model does not support estimating the " - "following parameters. Please recompile the model and ensure " - "that these parameters are not treated as constants. Deleting " - "the current model might also resolve this. Parameters: " - f"{amici_ids_free_required.difference(amici_ids_free)}" - ) - - -def _create_model_output_dir_name( - sbml_model: "libsbml.Model", model_name: Optional[str] = None -) -> Path: - """ - Find a folder for storing the compiled amici model. - If possible, use the sbml model id, otherwise create a random folder. - The folder will be located in the `amici_models` subfolder of the current - folder. - """ - BASE_DIR = Path("amici_models").absolute() - BASE_DIR.mkdir(exist_ok=True) - # try model_name - if model_name: - return BASE_DIR / model_name - - # try sbml model id - if sbml_model_id := sbml_model.getId(): - return BASE_DIR / sbml_model_id - - # create random folder name - return Path(tempfile.mkdtemp(dir=BASE_DIR)) - - -def _create_model_name(folder: Union[str, Path]) -> str: - """ - Create a name for the model. - Just re-use the last part of the folder. - """ - return os.path.split(os.path.normpath(folder))[-1] - - -def _can_import_model( - model_name: str, model_output_dir: Union[str, Path] -) -> bool: - """ - Check whether a module of that name can already be imported. - """ - # try to import (in particular checks version) - try: - with amici.add_path(model_output_dir): - model_module = importlib.import_module(model_name) - except ModuleNotFoundError: - return False - - # no need to (re-)compile - return hasattr(model_module, "getModel") - - -@log_execution_time("Importing PEtab model", logger) -def import_model_sbml( - sbml_model: Union[str, Path, "libsbml.Model"] = None, - condition_table: Optional[Union[str, Path, pd.DataFrame]] = None, - observable_table: Optional[Union[str, Path, pd.DataFrame]] = None, - measurement_table: Optional[Union[str, Path, pd.DataFrame]] = None, - petab_problem: petab.Problem = None, - model_name: Optional[str] = None, - model_output_dir: Optional[Union[str, Path]] = None, - verbose: Optional[Union[bool, int]] = True, - allow_reinit_fixpar_initcond: bool = True, - validate: bool = True, - non_estimated_parameters_as_constants=True, - output_parameter_defaults: Optional[Dict[str, float]] = None, - discard_sbml_annotations: bool = False, - **kwargs, -) -> amici.SbmlImporter: - """ - Create AMICI model from PEtab problem - - :param sbml_model: - PEtab SBML model or SBML file name. - Deprecated, pass ``petab_problem`` instead. - - :param condition_table: - PEtab condition table. If provided, parameters from there will be - turned into AMICI constant parameters (i.e. parameters w.r.t. which - no sensitivities will be computed). - Deprecated, pass ``petab_problem`` instead. - - :param observable_table: - PEtab observable table. Deprecated, pass ``petab_problem`` instead. - - :param measurement_table: - PEtab measurement table. Deprecated, pass ``petab_problem`` instead. - - :param petab_problem: - PEtab problem. - :param model_name: - Name of the generated model. If model file name was provided, - this defaults to the file name without extension, otherwise - the SBML model ID will be used. - - :param model_output_dir: - Directory to write the model code to. Will be created if doesn't - exist. Defaults to current directory. - - :param verbose: - Print/log extra information. - - :param allow_reinit_fixpar_initcond: - See :class:`amici.de_export.ODEExporter`. Must be enabled if initial - states are to be reset after preequilibration. - - :param validate: - Whether to validate the PEtab problem - - :param non_estimated_parameters_as_constants: - Whether parameters marked as non-estimated in PEtab should be - considered constant in AMICI. Setting this to ``True`` will reduce - model size and simulation times. If sensitivities with respect to those - parameters are required, this should be set to ``False``. - - :param output_parameter_defaults: - Optional default parameter values for output parameters introduced in - the PEtab observables table, in particular for placeholder parameters. - Dictionary mapping parameter IDs to default values. - - :param discard_sbml_annotations: - Discard information contained in AMICI SBML annotations (debug). - - :param kwargs: - Additional keyword arguments to be passed to - :meth:`amici.sbml_import.SbmlImporter.sbml2amici`. - - :return: - The created :class:`amici.sbml_import.SbmlImporter` instance. - """ - from petab.models.sbml_model import SbmlModel - - set_log_level(logger, verbose) - - logger.info("Importing model ...") - - if any([sbml_model, condition_table, observable_table, measurement_table]): - warn( - "The `sbml_model`, `condition_table`, `observable_table`, and " - "`measurement_table` arguments are deprecated and will be " - "removed in a future version. Use `petab_problem` instead.", - DeprecationWarning, - stacklevel=2, - ) - if petab_problem: - raise ValueError( - "Must not pass a `petab_problem` argument in " - "combination with any of `sbml_model`, " - "`condition_table`, `observable_table`, or " - "`measurement_table`." - ) - - petab_problem = petab.Problem( - model=SbmlModel(sbml_model) - if isinstance(sbml_model, libsbml.Model) - else SbmlModel.from_file(sbml_model), - condition_df=petab.get_condition_df(condition_table), - observable_df=petab.get_observable_df(observable_table), - ) - - if petab_problem.observable_df is None: - raise NotImplementedError( - "PEtab import without observables table " - "is currently not supported." - ) - - assert isinstance(petab_problem.model, SbmlModel) - - if validate: - logger.info("Validating PEtab problem ...") - petab.lint_problem(petab_problem) - - # Model name from SBML ID or filename - if model_name is None: - if not (model_name := petab_problem.model.sbml_model.getId()): - if not isinstance(sbml_model, (str, Path)): - raise ValueError( - "No `model_name` was provided and no model " - "ID was specified in the SBML model." - ) - model_name = os.path.splitext(os.path.split(sbml_model)[-1])[0] - - if model_output_dir is None: - model_output_dir = os.path.join( - os.getcwd(), f"{model_name}-amici{amici.__version__}" - ) - - logger.info( - f"Model name is '{model_name}'.\n" - f"Writing model code to '{model_output_dir}'." - ) - - # Create a copy, because it will be modified by SbmlImporter - sbml_doc = petab_problem.model.sbml_model.getSBMLDocument().clone() - sbml_model = sbml_doc.getModel() - - show_model_info(sbml_model) - - sbml_importer = amici.SbmlImporter( - sbml_model, - discard_annotations=discard_sbml_annotations, - ) - sbml_model = sbml_importer.sbml - - allow_n_noise_pars = ( - not petab.lint.observable_table_has_nontrivial_noise_formula( - petab_problem.observable_df - ) - ) - if ( - petab_problem.measurement_df is not None - and petab.lint.measurement_table_has_timepoint_specific_mappings( - petab_problem.measurement_df, - allow_scalar_numeric_noise_parameters=allow_n_noise_pars, - ) - ): - raise ValueError( - "AMICI does not support importing models with timepoint specific " - "mappings for noise or observable parameters. Please flatten " - "the problem and try again." - ) - - if petab_problem.observable_df is not None: - observables, noise_distrs, sigmas = get_observation_model( - petab_problem.observable_df - ) - else: - observables = noise_distrs = sigmas = None - - logger.info(f"Observables: {len(observables)}") - logger.info(f"Sigmas: {len(sigmas)}") - - if len(sigmas) != len(observables): - raise AssertionError( - f"Number of provided observables ({len(observables)}) and sigmas " - f"({len(sigmas)}) do not match." - ) - - # TODO: adding extra output parameters is currently not supported, - # so we add any output parameters to the SBML model. - # this should be changed to something more elegant - # - formulas = chain( - (val["formula"] for val in observables.values()), sigmas.values() - ) - output_parameters = OrderedDict() - for formula in formulas: - # we want reproducible parameter ordering upon repeated import - free_syms = sorted( - sp.sympify(formula, locals=_clash).free_symbols, - key=lambda symbol: symbol.name, - ) - for free_sym in free_syms: - sym = str(free_sym) - if ( - sbml_model.getElementBySId(sym) is None - and sym != "time" - and sym not in observables - ): - output_parameters[sym] = None - logger.debug( - "Adding output parameters to model: " - f"{list(output_parameters.keys())}" - ) - output_parameter_defaults = output_parameter_defaults or {} - if extra_pars := ( - set(output_parameter_defaults) - set(output_parameters.keys()) - ): - raise ValueError( - f"Default output parameter values were given for {extra_pars}, " - "but they those are not output parameters." - ) - - for par in output_parameters.keys(): - _add_global_parameter( - sbml_model=sbml_model, - parameter_id=par, - value=output_parameter_defaults.get(par, 0.0), - ) - # - - # TODO: to parameterize initial states or compartment sizes, we currently - # need initial assignments. if they occur in the condition table, we - # create a new parameter initial_${speciesOrCompartmentID}. - # feels dirty and should be changed (see also #924) - # - - initial_states = get_states_in_condition_table(petab_problem) - fixed_parameters = [] - if initial_states: - # add preequilibration indicator variable - # NOTE: would only be required if we actually have preequilibration - # adding it anyways. can be optimized-out later - if sbml_model.getParameter(PREEQ_INDICATOR_ID) is not None: - raise AssertionError( - "Model already has a parameter with ID " - f"{PREEQ_INDICATOR_ID}. Cannot handle " - "species and compartments in condition table " - "then." - ) - indicator = sbml_model.createParameter() - indicator.setId(PREEQ_INDICATOR_ID) - indicator.setName(PREEQ_INDICATOR_ID) - # Can only reset parameters after preequilibration if they are fixed. - fixed_parameters.append(PREEQ_INDICATOR_ID) - logger.debug( - "Adding preequilibration indicator " - f"constant {PREEQ_INDICATOR_ID}" - ) - logger.debug(f"Adding initial assignments for {initial_states.keys()}") - for assignee_id in initial_states: - init_par_id_preeq = f"initial_{assignee_id}_preeq" - init_par_id_sim = f"initial_{assignee_id}_sim" - for init_par_id in [init_par_id_preeq, init_par_id_sim]: - if sbml_model.getElementBySId(init_par_id) is not None: - raise ValueError( - "Cannot create parameter for initial assignment " - f"for {assignee_id} because an entity named " - f"{init_par_id} exists already in the model." - ) - init_par = sbml_model.createParameter() - init_par.setId(init_par_id) - init_par.setName(init_par_id) - assignment = sbml_model.getInitialAssignment(assignee_id) - if assignment is None: - assignment = sbml_model.createInitialAssignment() - assignment.setSymbol(assignee_id) - else: - logger.debug( - "The SBML model has an initial assignment defined " - f"for model entity {assignee_id}, but this entity " - "also has an initial value defined in the PEtab " - "condition table. The SBML initial assignment will " - "be overwritten to handle preequilibration and " - "initial values specified by the PEtab problem." - ) - formula = ( - f"{PREEQ_INDICATOR_ID} * {init_par_id_preeq} " - f"+ (1 - {PREEQ_INDICATOR_ID}) * {init_par_id_sim}" - ) - math_ast = libsbml.parseL3Formula(formula) - assignment.setMath(math_ast) - # - - fixed_parameters.extend( - get_fixed_parameters( - petab_problem=petab_problem, - non_estimated_parameters_as_constants=non_estimated_parameters_as_constants, - ) - ) - - logger.debug(f"Fixed parameters are {fixed_parameters}") - logger.info(f"Overall fixed parameters: {len(fixed_parameters)}") - logger.info( - "Variable parameters: " - + str(len(sbml_model.getListOfParameters()) - len(fixed_parameters)) - ) - - # Create Python module from SBML model - sbml_importer.sbml2amici( - model_name=model_name, - output_dir=model_output_dir, - observables=observables, - constant_parameters=fixed_parameters, - sigmas=sigmas, - allow_reinit_fixpar_initcond=allow_reinit_fixpar_initcond, - noise_distributions=noise_distrs, - verbose=verbose, - **kwargs, - ) - - if kwargs.get( - "compile", - amici._get_default_argument(sbml_importer.sbml2amici, "compile"), - ): - # check that the model extension was compiled successfully - model_module = amici.import_model_module(model_name, model_output_dir) - model = model_module.getModel() - check_model(amici_model=model, petab_problem=petab_problem) - - return sbml_importer - - -# for backwards compatibility -import_model = import_model_sbml - - -def get_observation_model( - observable_df: pd.DataFrame, -) -> Tuple[ - Dict[str, Dict[str, str]], Dict[str, str], Dict[str, Union[str, float]] -]: - """ - Get observables, sigmas, and noise distributions from PEtab observation - table in a format suitable for - :meth:`amici.sbml_import.SbmlImporter.sbml2amici`. - - :param observable_df: - PEtab observables table - - :return: - Tuple of dicts with observables, noise distributions, and sigmas. - """ - if observable_df is None: - return {}, {}, {} - - observables = {} - sigmas = {} - - nan_pat = r"^[nN]a[nN]$" - for _, observable in observable_df.iterrows(): - oid = str(observable.name) - # need to sanitize due to https://github.com/PEtab-dev/PEtab/issues/447 - name = re.sub(nan_pat, "", str(observable.get(OBSERVABLE_NAME, ""))) - formula_obs = re.sub(nan_pat, "", str(observable[OBSERVABLE_FORMULA])) - formula_noise = re.sub(nan_pat, "", str(observable[NOISE_FORMULA])) - observables[oid] = {"name": name, "formula": formula_obs} - sigmas[oid] = formula_noise - - # PEtab does currently not allow observables in noiseFormula and AMICI - # cannot handle states in sigma expressions. Therefore, where possible, - # replace species occurring in error model definition by observableIds. - replacements = { - sp.sympify(observable["formula"], locals=_clash): sp.Symbol( - observable_id - ) - for observable_id, observable in observables.items() - } - for observable_id, formula in sigmas.items(): - repl = sp.sympify(formula, locals=_clash).subs(replacements) - sigmas[observable_id] = str(repl) - - noise_distrs = petab_noise_distributions_to_amici(observable_df) - - return observables, noise_distrs, sigmas - - -def petab_noise_distributions_to_amici( - observable_df: pd.DataFrame, -) -> Dict[str, str]: - """ - Map from the petab to the amici format of noise distribution - identifiers. - - :param observable_df: - PEtab observable table - - :return: - Dictionary of observable_id => AMICI noise-distributions - """ - amici_distrs = {} - for _, observable in observable_df.iterrows(): - amici_val = "" - - if ( - OBSERVABLE_TRANSFORMATION in observable - and isinstance(observable[OBSERVABLE_TRANSFORMATION], str) - and observable[OBSERVABLE_TRANSFORMATION] - ): - amici_val += observable[OBSERVABLE_TRANSFORMATION] + "-" - - if ( - NOISE_DISTRIBUTION in observable - and isinstance(observable[NOISE_DISTRIBUTION], str) - and observable[NOISE_DISTRIBUTION] - ): - amici_val += observable[NOISE_DISTRIBUTION] - else: - amici_val += "normal" - amici_distrs[observable.name] = amici_val - - return amici_distrs - - -def petab_scale_to_amici_scale(scale_str: str) -> int: - """Convert PEtab parameter scaling string to AMICI scaling integer""" - - if scale_str == petab.LIN: - return amici.ParameterScaling_none - if scale_str == petab.LOG: - return amici.ParameterScaling_ln - if scale_str == petab.LOG10: - return amici.ParameterScaling_log10 - - raise ValueError(f"Invalid parameter scale {scale_str}") - - -def show_model_info(sbml_model: "libsbml.Model"): - """Log some model quantities""" - - logger.info(f"Species: {len(sbml_model.getListOfSpecies())}") - logger.info( - "Global parameters: " + str(len(sbml_model.getListOfParameters())) - ) - logger.info(f"Reactions: {len(sbml_model.getListOfReactions())}") - - -def _parse_cli_args(): - """ - Parse command line arguments - - :return: - Parsed CLI arguments from :mod:`argparse`. - """ - parser = argparse.ArgumentParser( - description="Import PEtab-format model into AMICI." - ) - - # General options: - parser.add_argument( - "-v", - "--verbose", - dest="verbose", - action="store_true", - help="More verbose output", - ) - parser.add_argument( - "-o", - "--output-dir", - dest="model_output_dir", - help="Name of the model directory to create", - ) - parser.add_argument( - "--no-compile", - action="store_false", - dest="compile", - help="Only generate model code, do not compile", - ) - parser.add_argument( - "--no-validate", - action="store_false", - dest="validate", - help="Skip validation of PEtab files", - ) - parser.add_argument( - "--flatten", - dest="flatten", - default=False, - action="store_true", - help="Flatten measurement specific overrides of " - "observable and noise parameters", - ) - parser.add_argument( - "--no-sensitivities", - dest="generate_sensitivity_code", - default=True, - action="store_false", - help="Skip generation of sensitivity code", - ) - - # Call with set of files - parser.add_argument( - "-s", "--sbml", dest="sbml_file_name", help="SBML model filename" - ) - parser.add_argument( - "-m", - "--measurements", - dest="measurement_file_name", - help="Measurement table", - ) - parser.add_argument( - "-c", - "--conditions", - dest="condition_file_name", - help="Conditions table", - ) - parser.add_argument( - "-p", - "--parameters", - dest="parameter_file_name", - help="Parameter table", - ) - parser.add_argument( - "-b", - "--observables", - dest="observable_file_name", - help="Observable table", - ) - - parser.add_argument( - "-y", - "--yaml", - dest="yaml_file_name", - help="PEtab YAML problem filename", - ) - - parser.add_argument( - "-n", - "--model-name", - dest="model_name", - help="Name of the python module generated for the " "model", - ) - - args = parser.parse_args() - - if not args.yaml_file_name and not all( - ( - args.sbml_file_name, - args.condition_file_name, - args.observable_file_name, - ) - ): - parser.error( - "When not specifying a model name or YAML file, then " - "SBML, condition and observable file must be specified" - ) - - return args - - -def _main(): - """ - Command line interface to import a model in the PEtab - (https://github.com/PEtab-dev/PEtab/) format into AMICI. - """ - args = _parse_cli_args() - - if args.yaml_file_name: - pp = petab.Problem.from_yaml(args.yaml_file_name) - else: - pp = petab.Problem.from_files( - sbml_file=args.sbml_file_name, - condition_file=args.condition_file_name, - measurement_file=args.measurement_file_name, - parameter_file=args.parameter_file_name, - observable_files=args.observable_file_name, - ) - - # Check for valid PEtab before potentially modifying it - if args.validate: - petab.lint_problem(pp) - - if args.flatten: - petab.flatten_timepoint_specific_output_overrides(pp) - - import_model( - model_name=args.model_name, - sbml_model=pp.sbml_model, - condition_table=pp.condition_df, - observable_table=pp.observable_df, - measurement_table=pp.measurement_df, - model_output_dir=args.model_output_dir, - compile=args.compile, - generate_sensitivity_code=args.generate_sensitivity_code, - verbose=args.verbose, - validate=False, - ) - - -if __name__ == "__main__": - _main() +.. deprecated:: 0.21.0 + Use :mod:`amici.petab` instead. +""" +import warnings + +warnings.warn( + "Importing amici.petab_import is deprecated. Use `amici.petab` instead.", + DeprecationWarning, +) + +from .petab.import_helpers import ( # noqa # pylint: disable=unused-import + get_observation_model, + petab_noise_distributions_to_amici, + petab_scale_to_amici_scale, +) + +# DEPRECATED - DON'T ADD ANYTHING NEW HERE +from .petab.petab_import import ( # noqa # pylint: disable=unused-import + check_model, + import_model, + import_model_sbml, + import_petab_problem, +) +from .petab.sbml_import import ( # noqa + _get_fixed_parameters_sbml as get_fixed_parameters, +) +from .petab.sbml_import import species_to_parameters # noqa + +__all__ = [ + "get_observation_model", + "petab_noise_distributions_to_amici", + "petab_scale_to_amici_scale", + "check_model", + "import_model", + "import_model_sbml", + "import_petab_problem", + "get_fixed_parameters", + "species_to_parameters", +] diff --git a/python/sdist/amici/petab_import_pysb.py b/python/sdist/amici/petab_import_pysb.py index 8036d1358d..595018f208 100644 --- a/python/sdist/amici/petab_import_pysb.py +++ b/python/sdist/amici/petab_import_pysb.py @@ -1,274 +1,20 @@ """ -PySB-PEtab Import ------------------ -Import a model in the PySB-adapted :mod:`petab` -(https://github.com/PEtab-dev/PEtab) format into AMICI. -""" - -import logging -import re -from pathlib import Path -from typing import Optional, Union - -import petab -import pysb -import pysb.bng -import sympy as sp -from petab.C import CONDITION_NAME, NOISE_FORMULA, OBSERVABLE_FORMULA -from petab.models.pysb_model import PySBModel - -from .logging import get_logger, log_execution_time, set_log_level -from .petab_util import PREEQ_INDICATOR_ID, get_states_in_condition_table - -logger = get_logger(__name__, logging.WARNING) - - -def _add_observation_model( - pysb_model: pysb.Model, petab_problem: petab.Problem -): - """Extend PySB model by observation model as defined in the PEtab - observables table""" - - # add any required output parameters - local_syms = { - sp.Symbol.__str__(comp): comp - for comp in pysb_model.components - if isinstance(comp, sp.Symbol) - } - for formula in [ - *petab_problem.observable_df[OBSERVABLE_FORMULA], - *petab_problem.observable_df[NOISE_FORMULA], - ]: - sym = sp.sympify(formula, locals=local_syms) - for s in sym.free_symbols: - if not isinstance(s, pysb.Component): - p = pysb.Parameter(str(s), 1.0) - pysb_model.add_component(p) - local_syms[sp.Symbol.__str__(p)] = p - - # add observables and sigmas to pysb model - for observable_id, observable_formula, noise_formula in zip( - petab_problem.observable_df.index, - petab_problem.observable_df[OBSERVABLE_FORMULA], - petab_problem.observable_df[NOISE_FORMULA], - ): - obs_symbol = sp.sympify(observable_formula, locals=local_syms) - if observable_id in pysb_model.expressions.keys(): - obs_expr = pysb_model.expressions[observable_id] - else: - obs_expr = pysb.Expression(observable_id, obs_symbol) - pysb_model.add_component(obs_expr) - local_syms[observable_id] = obs_expr - - sigma_id = f"{observable_id}_sigma" - sigma_symbol = sp.sympify(noise_formula, locals=local_syms) - sigma_expr = pysb.Expression(sigma_id, sigma_symbol) - pysb_model.add_component(sigma_expr) - local_syms[sigma_id] = sigma_expr - - -def _add_initialization_variables( - pysb_model: pysb.Model, petab_problem: petab.Problem -): - """Add initialization variables to the PySB model to support initial - conditions specified in the PEtab condition table. - - To parameterize initial states, we currently need initial assignments. - If they occur in the condition table, we create a new parameter - initial_${speciesID}. Feels dirty and should be changed (see also #924). - """ - - initial_states = get_states_in_condition_table(petab_problem) - fixed_parameters = [] - if initial_states: - # add preequilibration indicator variable - # NOTE: would only be required if we actually have preequilibration - # adding it anyways. can be optimized-out later - if PREEQ_INDICATOR_ID in [c.name for c in pysb_model.components]: - raise AssertionError( - "Model already has a component with ID " - f"{PREEQ_INDICATOR_ID}. Cannot handle " - "species and compartments in condition table " - "then." - ) - preeq_indicator = pysb.Parameter(PREEQ_INDICATOR_ID) - pysb_model.add_component(preeq_indicator) - # Can only reset parameters after preequilibration if they are fixed. - fixed_parameters.append(PREEQ_INDICATOR_ID) - logger.debug( - "Adding preequilibration indicator constant " - f"{PREEQ_INDICATOR_ID}" - ) - logger.debug(f"Adding initial assignments for {initial_states.keys()}") - - for assignee_id in initial_states: - init_par_id_preeq = f"initial_{assignee_id}_preeq" - init_par_id_sim = f"initial_{assignee_id}_sim" - for init_par_id in [init_par_id_preeq, init_par_id_sim]: - if init_par_id in [c.name for c in pysb_model.components]: - raise ValueError( - "Cannot create parameter for initial assignment " - f"for {assignee_id} because an entity named " - f"{init_par_id} exists already in the model." - ) - p = pysb.Parameter(init_par_id) - pysb_model.add_component(p) - - species_idx = int(re.match(r"__s(\d+)$", assignee_id)[1]) - # use original model here since that's what was used to generate - # the ids in initial_states - species_pattern = petab_problem.model.model.species[species_idx] - - # species pattern comes from the _original_ model, but we only want - # to modify pysb_model, so we have to reconstitute the pattern using - # pysb_model - for c in pysb_model.components: - globals()[c.name] = c - species_pattern = pysb.as_complex_pattern(eval(str(species_pattern))) - - from pysb.pattern import match_complex_pattern - - formula = pysb.Expression( - f"initial_{assignee_id}_formula", - preeq_indicator * pysb_model.parameters[init_par_id_preeq] - + (1 - preeq_indicator) * pysb_model.parameters[init_par_id_sim], - ) - pysb_model.add_component(formula) - - for initial in pysb_model.initials: - if match_complex_pattern( - initial.pattern, species_pattern, exact=True - ): - logger.debug( - "The PySB model has an initial defined for species " - f"{assignee_id}, but this species also has an initial " - "value defined in the PEtab condition table. The SBML " - "initial assignment will be overwritten to handle " - "preequilibration and initial values specified by the " - "PEtab problem." - ) - initial.value = formula - break - else: - # No initial in the pysb model, so add one - init = pysb.Initial(species_pattern, formula) - pysb_model.add_component(init) +PEtab import for PySB models - return fixed_parameters - - -@log_execution_time("Importing PEtab model", logger) -def import_model_pysb( - petab_problem: petab.Problem, - model_output_dir: Optional[Union[str, Path]] = None, - verbose: Optional[Union[bool, int]] = True, - model_name: Optional[str] = None, - **kwargs, -) -> None: - """ - Create AMICI model from PySB-PEtab problem - - :param petab_problem: - PySB PEtab problem - - :param model_output_dir: - Directory to write the model code to. Will be created if doesn't - exist. Defaults to current directory. - - :param verbose: - Print/log extra information. - - :param model_name: - Name of the generated model module - - :param kwargs: - Additional keyword arguments to be passed to - :meth:`amici.pysb_import.pysb2amici`. - """ - set_log_level(logger, verbose) - - logger.info("Importing model ...") - - if not isinstance(petab_problem.model, PySBModel): - raise ValueError("Not a PySB model") - - # need to create a copy here as we don't want to modify the original - pysb.SelfExporter.cleanup() - og_export = pysb.SelfExporter.do_export - pysb.SelfExporter.do_export = False - pysb_model = pysb.Model( - base=petab_problem.model.model, - name=petab_problem.model.model_id, - ) - - _add_observation_model(pysb_model, petab_problem) - # generate species for the _original_ model - pysb.bng.generate_equations(petab_problem.model.model) - fixed_parameters = _add_initialization_variables(pysb_model, petab_problem) - pysb.SelfExporter.do_export = og_export - - # check condition table for supported features, important to use pysb_model - # here, as we want to also cover output parameters - model_parameters = [p.name for p in pysb_model.parameters] - condition_species_parameters = get_states_in_condition_table( - petab_problem, return_patterns=True - ) - for x in petab_problem.condition_df.columns: - if x == CONDITION_NAME: - continue - - x = petab.mapping.resolve_mapping(petab_problem.mapping_df, x) - - # parameters - if x in model_parameters: - continue - - # species/pattern - if x in condition_species_parameters: - continue - - raise NotImplementedError( - "For PySB PEtab import, only model parameters and species, but " - "not compartments are allowed in the condition table. Offending " - f"column: {x}" - ) - - from .petab_import import ( - get_fixed_parameters, - petab_noise_distributions_to_amici, - ) - - constant_parameters = ( - get_fixed_parameters(petab_problem) + fixed_parameters - ) - - if petab_problem.observable_df is None: - observables = None - sigmas = None - noise_distrs = None - else: - observables = [ - expr.name - for expr in pysb_model.expressions - if expr.name in petab_problem.observable_df.index - ] +.. deprecated:: 0.21.0 + Use :mod:`amici.petab.pysb_import` instead. +""" +import warnings - sigmas = {obs_id: f"{obs_id}_sigma" for obs_id in observables} +from .petab.pysb_import import * # noqa: F401, F403 - noise_distrs = petab_noise_distributions_to_amici( - petab_problem.observable_df - ) +# DEPRECATED - DON'T ADD ANYTHING NEW HERE - from amici.pysb_import import pysb2amici +warnings.warn( + "Importing amici.petab_import_pysb is deprecated. Use `amici.petab.pysb_import` instead.", + DeprecationWarning, +) - pysb2amici( - model=pysb_model, - output_dir=model_output_dir, - model_name=model_name, - verbose=True, - observables=observables, - sigmas=sigmas, - constant_parameters=constant_parameters, - noise_distributions=noise_distrs, - **kwargs, - ) +__all__ = [ + "import_model_pysb", +] diff --git a/python/sdist/amici/petab_objective.py b/python/sdist/amici/petab_objective.py index e3111d3b68..01724b7a7d 100644 --- a/python/sdist/amici/petab_objective.py +++ b/python/sdist/amici/petab_objective.py @@ -1,1184 +1,53 @@ """ -PEtab Objective ---------------- -Functionality related to running simulations or evaluating the objective -function as defined by a PEtab problem +Evaluate a PEtab objective function. + +.. deprecated:: 0.21.0 + Use :mod:`amici.petab.simulations` instead. """ -import copy -import logging -import numbers -import re -from typing import ( - Any, - Collection, - Dict, - Iterator, - List, - Optional, - Sequence, - Tuple, - Union, -) +# THIS FILE IS TO BE REMOVED - DON'T ADD ANYTHING HERE! -import amici -import libsbml -import numpy as np -import pandas as pd -import petab -import sympy as sp -from amici.sbml_import import get_species_initial -from petab.C import * # noqa: F403 -from petab.models import MODEL_TYPE_PYSB, MODEL_TYPE_SBML -from sympy.abc import _clash +import warnings -from . import AmiciExpData, AmiciModel -from .logging import get_logger, log_execution_time -from .parameter_mapping import ( - ParameterMapping, - ParameterMappingForCondition, - fill_in_parameters, +warnings.warn( + f"Importing {__name__} is deprecated. Use `amici.petab.simulations` instead.", + DeprecationWarning, ) -from .petab_import import PREEQ_INDICATOR_ID -from .petab_util import get_states_in_condition_table - -try: - import pysb -except ImportError: - pysb = None - -logger = get_logger(__name__) - - -# string constant definitions -LLH = "llh" -SLLH = "sllh" -FIM = "fim" -S2LLH = "s2llh" -RES = "res" -SRES = "sres" -RDATAS = "rdatas" -EDATAS = "edatas" - - -@log_execution_time("Simulating PEtab model", logger) -def simulate_petab( - petab_problem: petab.Problem, - amici_model: AmiciModel, - solver: Optional[amici.Solver] = None, - problem_parameters: Optional[Dict[str, float]] = None, - simulation_conditions: Union[pd.DataFrame, Dict] = None, - edatas: List[AmiciExpData] = None, - parameter_mapping: ParameterMapping = None, - scaled_parameters: Optional[bool] = False, - log_level: int = logging.WARNING, - num_threads: int = 1, - failfast: bool = True, - scaled_gradients: bool = False, -) -> Dict[str, Any]: - """Simulate PEtab model. - - .. note:: - Regardless of `scaled_parameters`, unscaled sensitivities are returned, - unless `scaled_gradients=True`. - - :param petab_problem: - PEtab problem to work on. - :param amici_model: - AMICI Model assumed to be compatible with ``petab_problem``. - :param solver: - An AMICI solver. Will use default options if None. - :param problem_parameters: - Run simulation with these parameters. If ``None``, PEtab - ``nominalValues`` will be used. To be provided as dict, mapping PEtab - problem parameters to SBML IDs. - :param simulation_conditions: - Result of :py:func:`petab.get_simulation_conditions`. Can be provided - to save time if this has be obtained before. - Not required if ``edatas`` and ``parameter_mapping`` are provided. - :param edatas: - Experimental data. Parameters are inserted in-place for simulation. - :param parameter_mapping: - Optional precomputed PEtab parameter mapping for efficiency, as - generated by :py:func:`create_parameter_mapping`. - :param scaled_parameters: - If ``True``, ``problem_parameters`` are assumed to be on the scale - provided in the PEtab parameter table and will be unscaled. - If ``False``, they are assumed to be in linear scale. - :param log_level: - Log level, see :mod:`amici.logging` module. - :param num_threads: - Number of threads to use for simulating multiple conditions - (only used if compiled with OpenMP). - :param failfast: - Returns as soon as an integration failure is encountered, skipping - any remaining simulations. - :param scaled_gradients: - Whether to compute gradients on parameter scale (``True``) or not - (``False``). - - :return: - Dictionary of - - * cost function value (``LLH``), - * list of :class:`amici.amici.ReturnData` (``RDATAS``), - * list of :class:`amici.amici.ExpData` (``EDATAS``), - - corresponding to the different simulation conditions. - For ordering of simulation conditions, see - :meth:`petab.Problem.get_simulation_conditions_from_measurement_df`. - """ - logger.setLevel(log_level) - - if solver is None: - solver = amici_model.getSolver() - - # Switch to scaled parameters. - problem_parameters = _default_scaled_parameters( - petab_problem=petab_problem, - problem_parameters=problem_parameters, - scaled_parameters=scaled_parameters, - ) - scaled_parameters = True - - # number of amici simulations will be number of unique - # (preequilibrationConditionId, simulationConditionId) pairs. - # Can be optimized by checking for identical condition vectors. - if ( - simulation_conditions is None - and parameter_mapping is None - and edatas is None - ): - simulation_conditions = ( - petab_problem.get_simulation_conditions_from_measurement_df() - ) - - # Get parameter mapping - if parameter_mapping is None: - parameter_mapping = create_parameter_mapping( - petab_problem=petab_problem, - simulation_conditions=simulation_conditions, - scaled_parameters=scaled_parameters, - amici_model=amici_model, - ) - - # Get edatas - if edatas is None: - # Generate ExpData with all condition-specific information - edatas = create_edatas( - amici_model=amici_model, - petab_problem=petab_problem, - simulation_conditions=simulation_conditions, - ) - - # Fill parameters in ExpDatas (in-place) - fill_in_parameters( - edatas=edatas, - problem_parameters=problem_parameters, - scaled_parameters=scaled_parameters, - parameter_mapping=parameter_mapping, - amici_model=amici_model, - ) - - # Simulate - rdatas = amici.runAmiciSimulations( - amici_model, - solver, - edata_list=edatas, - num_threads=num_threads, - failfast=failfast, - ) - - # Compute total llh - llh = sum(rdata["llh"] for rdata in rdatas) - # Compute total sllh - sllh = None - if solver.getSensitivityOrder() != amici.SensitivityOrder.none: - sllh = aggregate_sllh( - amici_model=amici_model, - rdatas=rdatas, - parameter_mapping=parameter_mapping, - petab_scale=scaled_parameters, - petab_problem=petab_problem, - edatas=edatas, - ) - if not scaled_gradients and sllh is not None: - sllh = { - parameter_id: rescale_sensitivity( - sensitivity=sensitivity, - parameter_value=problem_parameters[parameter_id], - old_scale=petab_problem.parameter_df.loc[ - parameter_id, PARAMETER_SCALE - ], - new_scale=LIN, - ) - for parameter_id, sensitivity in sllh.items() - } - - # Log results - sim_cond = petab_problem.get_simulation_conditions_from_measurement_df() - for i, rdata in enumerate(rdatas): - sim_cond_id = "N/A" if sim_cond.empty else sim_cond.iloc[i, :].values - logger.debug( - f"Condition: {sim_cond_id}, status: {rdata['status']}, " - f"llh: {rdata['llh']}" - ) - - return { - LLH: llh, - SLLH: sllh, - RDATAS: rdatas, - EDATAS: edatas, - } - - -def aggregate_sllh( - amici_model: AmiciModel, - rdatas: Sequence[amici.ReturnDataView], - parameter_mapping: Optional[ParameterMapping], - edatas: List[AmiciExpData], - petab_scale: bool = True, - petab_problem: petab.Problem = None, -) -> Union[None, Dict[str, float]]: - """ - Aggregate likelihood gradient for all conditions, according to PEtab - parameter mapping. - - :param amici_model: - AMICI model from which ``rdatas`` were obtained. - :param rdatas: - Simulation results. - :param parameter_mapping: - PEtab parameter mapping to condition-specific simulation parameters. - :param edatas: - Experimental data used for simulation. - :param petab_scale: - Whether to check that sensitivities were computed with parameters on - the scales provided in the PEtab parameters table. - :param petab_problem: - The PEtab problem that defines the parameter scales. - - :return: - Aggregated likelihood sensitivities. - """ - accumulated_sllh = {} - model_parameter_ids = amici_model.getParameterIds() - - if petab_scale and petab_problem is None: - raise ValueError( - "Please provide the PEtab problem, when using " - "`petab_scale=True`." - ) - - # Check for issues in all condition simulation results. - for rdata in rdatas: - # Condition failed during simulation. - if rdata.status != amici.AMICI_SUCCESS: - return None - # Condition simulation result does not provide SLLH. - if rdata.sllh is None: - raise ValueError( - "The sensitivities of the likelihood for a condition were " - "not computed." - ) - - for condition_parameter_mapping, edata, rdata in zip( - parameter_mapping, edatas, rdatas - ): - for sllh_parameter_index, condition_parameter_sllh in enumerate( - rdata.sllh - ): - # Get PEtab parameter ID - # Use ExpData if it provides a parameter list, else default to - # Model. - if edata.plist: - model_parameter_index = edata.plist[sllh_parameter_index] - else: - model_parameter_index = amici_model.plist(sllh_parameter_index) - model_parameter_id = model_parameter_ids[model_parameter_index] - petab_parameter_id = condition_parameter_mapping.map_sim_var[ - model_parameter_id - ] - - # Initialize - if petab_parameter_id not in accumulated_sllh: - accumulated_sllh[petab_parameter_id] = 0 - - # Check that the scale is consistent - if petab_scale: - # `ParameterMappingForCondition` objects provide the scale in - # terms of `petab.C` constants already, not AMICI equivalents. - model_parameter_scale = ( - condition_parameter_mapping.scale_map_sim_var[ - model_parameter_id - ] - ) - petab_parameter_scale = petab_problem.parameter_df.loc[ - petab_parameter_id, PARAMETER_SCALE - ] - if model_parameter_scale != petab_parameter_scale: - raise ValueError( - f"The scale of the parameter `{petab_parameter_id}` " - "differs between the AMICI model " - f"({model_parameter_scale}) and the PEtab problem " - f"({petab_parameter_scale})." - ) - - # Accumulate - accumulated_sllh[petab_parameter_id] += condition_parameter_sllh - - return accumulated_sllh - - -def rescale_sensitivity( - sensitivity: float, - parameter_value: float, - old_scale: str, - new_scale: str, -) -> float: - """Rescale a sensitivity between parameter scales. - - :param sensitivity: - The sensitivity corresponding to the parameter value. - :param parameter_value: - The parameter vector element, on ``old_scale``. - :param old_scale: - The scale of the parameter value. - :param new_scale: - The parameter scale on which to rescale the sensitivity. - - :return: - The rescaled sensitivity. - """ - LOG_E_10 = np.log(10) - - if old_scale == new_scale: - return sensitivity - - unscaled_parameter_value = petab.parameters.unscale( - parameter=parameter_value, - scale_str=old_scale, - ) - - scale = { - (LIN, LOG): lambda s: s * unscaled_parameter_value, - (LOG, LIN): lambda s: s / unscaled_parameter_value, - (LIN, LOG10): lambda s: s * (unscaled_parameter_value * LOG_E_10), - (LOG10, LIN): lambda s: s / (unscaled_parameter_value * LOG_E_10), - } - - scale[(LOG, LOG10)] = lambda s: scale[(LIN, LOG10)](scale[(LOG, LIN)](s)) - scale[(LOG10, LOG)] = lambda s: scale[(LIN, LOG)](scale[(LOG10, LIN)](s)) - - if (old_scale, new_scale) not in scale: - raise NotImplementedError( - f"Old scale: {old_scale}. New scale: {new_scale}." - ) - - return scale[(old_scale, new_scale)](sensitivity) - - -def create_parameterized_edatas( - amici_model: AmiciModel, - petab_problem: petab.Problem, - problem_parameters: Dict[str, numbers.Number], - scaled_parameters: bool = False, - parameter_mapping: ParameterMapping = None, - simulation_conditions: Union[pd.DataFrame, Dict] = None, -) -> List[amici.ExpData]: - """Create list of :class:amici.ExpData objects with parameters filled in. - - :param amici_model: - AMICI Model assumed to be compatible with ``petab_problem``. - :param petab_problem: - PEtab problem to work on. - :param problem_parameters: - Run simulation with these parameters. If ``None``, PEtab - ``nominalValues`` will be used. To be provided as dict, mapping PEtab - problem parameters to SBML IDs. - :param scaled_parameters: - If ``True``, ``problem_parameters`` are assumed to be on the scale - provided in the PEtab parameter table and will be unscaled. - If ``False``, they are assumed to be in linear scale. - :param parameter_mapping: - Optional precomputed PEtab parameter mapping for efficiency, as - generated by :func:`create_parameter_mapping`. - :param simulation_conditions: - Result of :func:`petab.get_simulation_conditions`. Can be provided to - save time if this has been obtained before. - - :return: - List with one :class:`amici.amici.ExpData` per simulation condition, - with filled in timepoints, data and parameters. - """ - # number of amici simulations will be number of unique - # (preequilibrationConditionId, simulationConditionId) pairs. - # Can be optimized by checking for identical condition vectors. - if simulation_conditions is None: - simulation_conditions = ( - petab_problem.get_simulation_conditions_from_measurement_df() - ) - - # Get parameter mapping - if parameter_mapping is None: - parameter_mapping = create_parameter_mapping( - petab_problem=petab_problem, - simulation_conditions=simulation_conditions, - scaled_parameters=scaled_parameters, - amici_model=amici_model, - ) - - # Generate ExpData with all condition-specific information - edatas = create_edatas( - amici_model=amici_model, - petab_problem=petab_problem, - simulation_conditions=simulation_conditions, - ) - - # Fill parameters in ExpDatas (in-place) - fill_in_parameters( - edatas=edatas, - problem_parameters=problem_parameters, - scaled_parameters=scaled_parameters, - parameter_mapping=parameter_mapping, - amici_model=amici_model, - ) - - return edatas - - -def create_parameter_mapping( - petab_problem: petab.Problem, - simulation_conditions: Union[pd.DataFrame, List[Dict]], - scaled_parameters: bool, - amici_model: AmiciModel, - **parameter_mapping_kwargs, -) -> ParameterMapping: - """Generate AMICI specific parameter mapping. - - :param petab_problem: - PEtab problem - :param simulation_conditions: - Result of :func:`petab.get_simulation_conditions`. Can be provided to - save time if this has been obtained before. - :param scaled_parameters: - If ``True``, problem_parameters are assumed to be on the scale provided - in the PEtab parameter table and will be unscaled. If ``False``, they - are assumed to be in linear scale. - :param amici_model: - AMICI model. - :param parameter_mapping_kwargs: - Optional keyword arguments passed to - :func:`petab.get_optimization_to_simulation_parameter_mapping`. - To allow changing fixed PEtab problem parameters (``estimate=0``), - use ``fill_fixed_parameters=False``. - :return: - List of the parameter mappings. - """ - if simulation_conditions is None: - simulation_conditions = ( - petab_problem.get_simulation_conditions_from_measurement_df() - ) - if isinstance(simulation_conditions, list): - simulation_conditions = pd.DataFrame(data=simulation_conditions) - - # Because AMICI globalizes all local parameters during model import, - # we need to do that here as well to prevent parameter mapping errors - # (PEtab does currently not care about SBML LocalParameters) - if petab_problem.model.type_id == MODEL_TYPE_SBML: - if petab_problem.sbml_document: - converter_config = ( - libsbml.SBMLLocalParameterConverter().getDefaultProperties() - ) - petab_problem.sbml_document.convert(converter_config) - else: - logger.debug( - "No petab_problem.sbml_document is set. Cannot " - "convert SBML LocalParameters. If the model contains " - "LocalParameters, parameter mapping will fail." - ) - - default_parameter_mapping_kwargs = { - "warn_unmapped": False, - "scaled_parameters": scaled_parameters, - "allow_timepoint_specific_numeric_noise_parameters": not petab.lint.observable_table_has_nontrivial_noise_formula( - petab_problem.observable_df - ), - } - if parameter_mapping_kwargs is None: - parameter_mapping_kwargs = {} - - prelim_parameter_mapping = ( - petab.get_optimization_to_simulation_parameter_mapping( - condition_df=petab_problem.condition_df, - measurement_df=petab_problem.measurement_df, - parameter_df=petab_problem.parameter_df, - observable_df=petab_problem.observable_df, - mapping_df=petab_problem.mapping_df, - model=petab_problem.model, - simulation_conditions=simulation_conditions, - **dict( - default_parameter_mapping_kwargs, **parameter_mapping_kwargs - ), - ) - ) - - parameter_mapping = ParameterMapping() - for (_, condition), prelim_mapping_for_condition in zip( - simulation_conditions.iterrows(), prelim_parameter_mapping - ): - mapping_for_condition = create_parameter_mapping_for_condition( - prelim_mapping_for_condition, condition, petab_problem, amici_model - ) - parameter_mapping.append(mapping_for_condition) - - return parameter_mapping - - -def _get_initial_state_sbml( - petab_problem: petab.Problem, element_id: str -) -> Union[float, sp.Basic]: - element = petab_problem.sbml_model.getElementBySId(element_id) - type_code = element.getTypeCode() - initial_assignment = petab_problem.sbml_model.getInitialAssignmentBySymbol( - element_id - ) - if initial_assignment: - initial_assignment = sp.sympify( - libsbml.formulaToL3String(initial_assignment.getMath()), - locals=_clash, - ) - if type_code == libsbml.SBML_SPECIES: - value = ( - get_species_initial(element) - if initial_assignment is None - else initial_assignment - ) - elif type_code == libsbml.SBML_PARAMETER: - value = ( - element.getValue() - if initial_assignment is None - else initial_assignment - ) - elif type_code == libsbml.SBML_COMPARTMENT: - value = ( - element.getSize() - if initial_assignment is None - else initial_assignment - ) - else: - raise NotImplementedError( - f"Don't know what how to handle {element_id} in " - "condition table." - ) - return value - - -def _get_initial_state_pysb( - petab_problem: petab.Problem, element_id: str -) -> Union[float, sp.Symbol]: - species_idx = int(re.match(r"__s(\d+)$", element_id)[1]) - species_pattern = petab_problem.model.model.species[species_idx] - from pysb.pattern import match_complex_pattern - - value = next( - ( - initial.value - for initial in petab_problem.model.model.initials - if match_complex_pattern( - initial.pattern, species_pattern, exact=True - ) - ), - 0.0, - ) - if isinstance(value, pysb.Parameter): - if value.name in petab_problem.parameter_df.index: - value = value.name - else: - value = value.value - - return value - - -def _set_initial_state( - petab_problem, - condition_id, - element_id, - init_par_id, - par_map, - scale_map, - value, -): - value = petab.to_float_if_float(value) - if pd.isna(value): - if petab_problem.model.type_id == MODEL_TYPE_SBML: - value = _get_initial_state_sbml(petab_problem, element_id) - elif petab_problem.model.type_id == MODEL_TYPE_PYSB: - value = _get_initial_state_pysb(petab_problem, element_id) - - try: - value = float(value) - except (ValueError, TypeError): - if sp.nsimplify(value).is_Atom and ( - pysb is None or not isinstance(value, pysb.Component) - ): - # Get rid of multiplication with one - value = sp.nsimplify(value) - else: - raise NotImplementedError( - "Cannot handle non-trivial initial state " - f"expression for {element_id}: {value}" - ) - # this should be a parameter ID - value = str(value) - logger.debug( - f"The species {element_id} has no initial value " - f"defined for the condition {condition_id} in " - "the PEtab conditions table. The initial value is " - f"now set to {value}, which is the initial value " - "defined in the SBML model." - ) - par_map[init_par_id] = value - if isinstance(value, float): - # numeric initial state - scale_map[init_par_id] = petab.LIN - else: - # parametric initial state - scale_map[init_par_id] = petab_problem.parameter_df[ - PARAMETER_SCALE - ].get(value, petab.LIN) - - -def create_parameter_mapping_for_condition( - parameter_mapping_for_condition: petab.ParMappingDictQuadruple, - condition: Union[pd.Series, Dict], - petab_problem: petab.Problem, - amici_model: AmiciModel, -) -> ParameterMappingForCondition: - """Generate AMICI specific parameter mapping for condition. - - :param parameter_mapping_for_condition: - Preliminary parameter mapping for condition. - :param condition: - :class:`pandas.DataFrame` row with ``preequilibrationConditionId`` and - ``simulationConditionId``. - :param petab_problem: - Underlying PEtab problem. - :param amici_model: - AMICI model. - - :return: - The parameter and parameter scale mappings, for fixed - preequilibration, fixed simulation, and variable simulation - parameters, and then the respective scalings. - """ - ( - condition_map_preeq, - condition_map_sim, - condition_scale_map_preeq, - condition_scale_map_sim, - ) = parameter_mapping_for_condition - logger.debug(f"PEtab mapping: {parameter_mapping_for_condition}") - - if len(condition_map_preeq) != len(condition_scale_map_preeq) or len( - condition_map_sim - ) != len(condition_scale_map_sim): - raise AssertionError( - "Number of parameters and number of parameter " - "scales do not match." - ) - if len(condition_map_preeq) and len(condition_map_preeq) != len( - condition_map_sim - ): - logger.debug(f"Preequilibration parameter map: {condition_map_preeq}") - logger.debug(f"Simulation parameter map: {condition_map_sim}") - raise AssertionError( - "Number of parameters for preequilbration " - "and simulation do not match." - ) - - ########################################################################## - # initial states - # Initial states have been set during model import based on the SBML model. - # If initial states were overwritten in the PEtab condition table, they are - # applied here. - # During model generation, parameters for initial concentrations and - # respective initial assignments have been created for the - # relevant species, here we add these parameters to the parameter mapping. - # In absence of preequilibration this could also be handled via - # ExpData.x0, but in the case of preequilibration this would not allow for - # resetting initial states. - - if states_in_condition_table := get_states_in_condition_table( - petab_problem, condition - ): - # set indicator fixed parameter for preeq - # (we expect here, that this parameter was added during import and - # that it was not added by the user with a different meaning...) - if condition_map_preeq: - condition_map_preeq[PREEQ_INDICATOR_ID] = 1.0 - condition_scale_map_preeq[PREEQ_INDICATOR_ID] = LIN - - condition_map_sim[PREEQ_INDICATOR_ID] = 0.0 - condition_scale_map_sim[PREEQ_INDICATOR_ID] = LIN - for element_id, ( - value, - preeq_value, - ) in states_in_condition_table.items(): - # for preequilibration - init_par_id = f"initial_{element_id}_preeq" - if ( - condition_id := condition.get(PREEQUILIBRATION_CONDITION_ID) - ) is not None: - _set_initial_state( - petab_problem, - condition_id, - element_id, - init_par_id, - condition_map_preeq, - condition_scale_map_preeq, - preeq_value, - ) - else: - # need to set dummy value for preeq parameter anyways, as it - # is expected below (set to 0, not nan, because will be - # multiplied with indicator variable in initial assignment) - condition_map_sim[init_par_id] = 0.0 - condition_scale_map_sim[init_par_id] = LIN - - # for simulation - condition_id = condition[SIMULATION_CONDITION_ID] - init_par_id = f"initial_{element_id}_sim" - _set_initial_state( - petab_problem, - condition_id, - element_id, - init_par_id, - condition_map_sim, - condition_scale_map_sim, - value, - ) - - ########################################################################## - # separate fixed and variable AMICI parameters, because we may have - # different fixed parameters for preeq and sim condition, but we cannot - # have different variable parameters. without splitting, - # merge_preeq_and_sim_pars_condition below may fail. - # TODO: This can be done already in parameter mapping creation. - variable_par_ids = amici_model.getParameterIds() - fixed_par_ids = amici_model.getFixedParameterIds() - - condition_map_preeq_var, condition_map_preeq_fix = _subset_dict( - condition_map_preeq, variable_par_ids, fixed_par_ids - ) - - ( - condition_scale_map_preeq_var, - condition_scale_map_preeq_fix, - ) = _subset_dict( - condition_scale_map_preeq, variable_par_ids, fixed_par_ids - ) - - condition_map_sim_var, condition_map_sim_fix = _subset_dict( - condition_map_sim, variable_par_ids, fixed_par_ids - ) - - condition_scale_map_sim_var, condition_scale_map_sim_fix = _subset_dict( - condition_scale_map_sim, variable_par_ids, fixed_par_ids - ) - - logger.debug( - "Fixed parameters preequilibration: " f"{condition_map_preeq_fix}" - ) - logger.debug("Fixed parameters simulation: " f"{condition_map_sim_fix}") - logger.debug( - "Variable parameters preequilibration: " f"{condition_map_preeq_var}" - ) - logger.debug("Variable parameters simulation: " f"{condition_map_sim_var}") - - petab.merge_preeq_and_sim_pars_condition( - condition_map_preeq_var, - condition_map_sim_var, - condition_scale_map_preeq_var, - condition_scale_map_sim_var, - condition, - ) - logger.debug(f"Merged: {condition_map_sim_var}") - - parameter_mapping_for_condition = ParameterMappingForCondition( - map_preeq_fix=condition_map_preeq_fix, - map_sim_fix=condition_map_sim_fix, - map_sim_var=condition_map_sim_var, - scale_map_preeq_fix=condition_scale_map_preeq_fix, - scale_map_sim_fix=condition_scale_map_sim_fix, - scale_map_sim_var=condition_scale_map_sim_var, - ) - - return parameter_mapping_for_condition - - -def create_edatas( - amici_model: AmiciModel, - petab_problem: petab.Problem, - simulation_conditions: Union[pd.DataFrame, Dict] = None, -) -> List[amici.ExpData]: - """Create list of :class:`amici.amici.ExpData` objects for PEtab problem. - - :param amici_model: - AMICI model. - :param petab_problem: - Underlying PEtab problem. - :param simulation_conditions: - Result of :func:`petab.get_simulation_conditions`. Can be provided to - save time if this has be obtained before. - - :return: - List with one :class:`amici.amici.ExpData` per simulation condition, - with filled in timepoints and data. - """ - if simulation_conditions is None: - simulation_conditions = ( - petab_problem.get_simulation_conditions_from_measurement_df() - ) - - observable_ids = amici_model.getObservableIds() - - measurement_groupvar = [SIMULATION_CONDITION_ID] - if PREEQUILIBRATION_CONDITION_ID in simulation_conditions: - measurement_groupvar.append(petab.PREEQUILIBRATION_CONDITION_ID) - measurement_dfs = dict( - list(petab_problem.measurement_df.groupby(measurement_groupvar)) - ) - - edatas = [] - for _, condition in simulation_conditions.iterrows(): - # Create amici.ExpData for each simulation - if PREEQUILIBRATION_CONDITION_ID in condition: - measurement_index = ( - condition.get(SIMULATION_CONDITION_ID), - condition.get(PREEQUILIBRATION_CONDITION_ID), - ) - else: - measurement_index = (condition.get(SIMULATION_CONDITION_ID),) - edata = create_edata_for_condition( - condition=condition, - amici_model=amici_model, - measurement_df=measurement_dfs[measurement_index], - petab_problem=petab_problem, - observable_ids=observable_ids, - ) - edatas.append(edata) - - return edatas - - -def create_edata_for_condition( - condition: Union[Dict, pd.Series], - measurement_df: pd.DataFrame, - amici_model: AmiciModel, - petab_problem: petab.Problem, - observable_ids: List[str], -) -> amici.ExpData: - """Get :class:`amici.amici.ExpData` for the given PEtab condition. - - Sets timepoints, observed data and sigmas. - - :param condition: - :class:`pandas.DataFrame` row with ``preequilibrationConditionId`` and - ``simulationConditionId``. - :param measurement_df: - :class:`pandas.DataFrame` with measurements for the given condition. - :param amici_model: - AMICI model - :param petab_problem: - Underlying PEtab problem - :param observable_ids: - List of observable IDs - - :return: - ExpData instance. - """ - if amici_model.nytrue != len(observable_ids): - raise AssertionError( - "Number of AMICI model observables does not " - "match number of PEtab observables." - ) - - # create an ExpData object - edata = amici.ExpData(amici_model) - edata.id = condition[SIMULATION_CONDITION_ID] - if condition.get(PREEQUILIBRATION_CONDITION_ID): - edata.id += "+" + condition.get(PREEQUILIBRATION_CONDITION_ID) - ########################################################################## - # enable initial parameters reinitialization - - states_in_condition_table = get_states_in_condition_table( - petab_problem, condition=condition - ) - if ( - condition.get(PREEQUILIBRATION_CONDITION_ID) - and states_in_condition_table - ): - state_ids = amici_model.getStateIds() - state_idx_reinitalization = [ - state_ids.index(s) - for s, (v, v_preeq) in states_in_condition_table.items() - if not np.isnan(v) - ] - edata.reinitialization_state_idxs_sim = state_idx_reinitalization - logger.debug( - "Enabling state reinitialization for condition " - f"{condition.get(PREEQUILIBRATION_CONDITION_ID, '')} - " - f"{condition.get(SIMULATION_CONDITION_ID)} " - f"{states_in_condition_table}" - ) - - ########################################################################## - # timepoints - - # find replicate numbers of time points - timepoints_w_reps = _get_timepoints_with_replicates( - df_for_condition=measurement_df - ) - edata.setTimepoints(timepoints_w_reps) - - ########################################################################## - # measurements and sigmas - y, sigma_y = _get_measurements_and_sigmas( - df_for_condition=measurement_df, - timepoints_w_reps=timepoints_w_reps, - observable_ids=observable_ids, - ) - edata.setObservedData(y.flatten()) - edata.setObservedDataStdDev(sigma_y.flatten()) - - return edata - - -def _subset_dict( - full: Dict[Any, Any], *args: Collection[Any] -) -> Iterator[Dict[Any, Any]]: - """Get subset of dictionary based on provided keys - - :param full: - Dictionary to subset - :param args: - Collections of keys to be contained in the different subsets - - :return: - subsetted dictionary - """ - for keys in args: - yield {key: val for (key, val) in full.items() if key in keys} - - -def _get_timepoints_with_replicates( - df_for_condition: pd.DataFrame, -) -> List[numbers.Number]: - """ - Get list of timepoints including replicate measurements - - :param df_for_condition: - PEtab measurement table subset for a single condition. - - :return: - Sorted list of timepoints, including multiple timepoints accounting - for replicate measurements. - """ - # create sorted list of all timepoints for which measurements exist - timepoints = sorted(df_for_condition[TIME].unique().astype(float)) - - # find replicate numbers of time points - timepoints_w_reps = [] - for time in timepoints: - # subselect for time - df_for_time = df_for_condition[ - df_for_condition.time.astype(float) == time - ] - # rep number is maximum over rep numbers for observables - n_reps = max(df_for_time.groupby([OBSERVABLE_ID, TIME]).size()) - # append time point n_rep times - timepoints_w_reps.extend([time] * n_reps) - - return timepoints_w_reps - - -def _get_measurements_and_sigmas( - df_for_condition: pd.DataFrame, - timepoints_w_reps: Sequence[numbers.Number], - observable_ids: Sequence[str], -) -> Tuple[np.array, np.array]: - """ - Get measurements and sigmas - - Generate arrays with measurements and sigmas in AMICI format from a - PEtab measurement table subset for a single condition. - - :param df_for_condition: - Subset of PEtab measurement table for one condition - - :param timepoints_w_reps: - Timepoints for which there exist measurements, including replicates - - :param observable_ids: - List of observable IDs for mapping IDs to indices. - - :return: - arrays for measurement and sigmas - """ - # prepare measurement matrix - y = np.full( - shape=(len(timepoints_w_reps), len(observable_ids)), fill_value=np.nan - ) - # prepare sigma matrix - sigma_y = y.copy() - - timepoints = sorted(df_for_condition[TIME].unique().astype(float)) - - for time in timepoints: - # subselect for time - df_for_time = df_for_condition[df_for_condition[TIME] == time] - time_ix_0 = timepoints_w_reps.index(time) - - # remember used time indices for each observable - time_ix_for_obs_ix = {} - - # iterate over measurements - for _, measurement in df_for_time.iterrows(): - # extract observable index - observable_ix = observable_ids.index(measurement[OBSERVABLE_ID]) - - # update time index for observable - if observable_ix in time_ix_for_obs_ix: - time_ix_for_obs_ix[observable_ix] += 1 - else: - time_ix_for_obs_ix[observable_ix] = time_ix_0 - - # fill observable and possibly noise parameter - y[time_ix_for_obs_ix[observable_ix], observable_ix] = measurement[ - MEASUREMENT - ] - if isinstance( - measurement.get(NOISE_PARAMETERS, None), numbers.Number - ): - sigma_y[ - time_ix_for_obs_ix[observable_ix], observable_ix - ] = measurement[NOISE_PARAMETERS] - return y, sigma_y - - -def rdatas_to_measurement_df( - rdatas: Sequence[amici.ReturnData], - model: AmiciModel, - measurement_df: pd.DataFrame, -) -> pd.DataFrame: - """ - Create a measurement dataframe in the PEtab format from the passed - ``rdatas`` and own information. - - :param rdatas: - A sequence of rdatas with the ordering of - :func:`petab.get_simulation_conditions`. - - :param model: - AMICI model used to generate ``rdatas``. - - :param measurement_df: - PEtab measurement table used to generate ``rdatas``. - - :return: - A dataframe built from the rdatas in the format of ``measurement_df``. - """ - simulation_conditions = petab.get_simulation_conditions(measurement_df) - - observable_ids = model.getObservableIds() - rows = [] - # iterate over conditions - for (_, condition), rdata in zip(simulation_conditions.iterrows(), rdatas): - # current simulation matrix - y = rdata.y - # time array used in rdata - t = list(rdata.ts) - - # extract rows for condition - cur_measurement_df = petab.get_rows_for_condition( - measurement_df, condition - ) - - # iterate over entries for the given condition - # note: this way we only generate a dataframe entry for every - # row that existed in the original dataframe. if we want to - # e.g. have also timepoints non-existent in the original file, - # we need to instead iterate over the rdata['y'] entries - for _, row in cur_measurement_df.iterrows(): - # copy row - row_sim = copy.deepcopy(row) - - # extract simulated measurement value - timepoint_idx = t.index(row[TIME]) - observable_idx = observable_ids.index(row[OBSERVABLE_ID]) - measurement_sim = y[timepoint_idx, observable_idx] - - # change measurement entry - row_sim[MEASUREMENT] = measurement_sim - - rows.append(row_sim) - - return pd.DataFrame(rows) - - -def rdatas_to_simulation_df( - rdatas: Sequence[amici.ReturnData], - model: AmiciModel, - measurement_df: pd.DataFrame, -) -> pd.DataFrame: - """Create a PEtab simulation dataframe from - :class:`amici.amici.ReturnData` s. - - See :func:`rdatas_to_measurement_df` for details, only that model outputs - will appear in column ``simulation`` instead of ``measurement``.""" - - df = rdatas_to_measurement_df( - rdatas=rdatas, model=model, measurement_df=measurement_df - ) - - return df.rename(columns={MEASUREMENT: SIMULATION}) - - -def _default_scaled_parameters( - petab_problem: petab.Problem, - problem_parameters: Optional[Dict[str, float]] = None, - scaled_parameters: bool = False, -) -> Optional[Dict[str, float]]: - """ - Helper method to handle an unscaled or unspecified parameter vector. - - The parameter vector defaults to the nominal values in the PEtab - parameter table. - - Unscaled parameter values are scaled. - - :param petab_problem: - The PEtab problem. - :param problem_parameters: - Keys are PEtab parameter IDs, values are parameter values on the scale - defined in the PEtab parameter table. Defaults to the nominal values in - the PEtab parameter table. - :param scaled_parameters: - Whether `problem_parameters` are on the scale defined in the PEtab - parameter table. +from .petab.conditions import fill_in_parameters # noqa: F401 +from .petab.parameter_mapping import create_parameter_mapping # noqa: F401 +from .petab.simulations import ( # noqa: F401 + EDATAS, + FIM, + LLH, + RDATAS, + RES, + S2LLH, + SLLH, + SRES, + aggregate_sllh, + create_edatas, + rdatas_to_measurement_df, + rdatas_to_simulation_df, + rescale_sensitivity, + simulate_petab, +) - :return: - The scaled parameter vector. - """ - if problem_parameters is None: - problem_parameters = dict( - zip( - petab_problem.x_ids, - petab_problem.x_nominal_scaled, - ) - ) - elif not scaled_parameters: - problem_parameters = petab_problem.scale_parameters(problem_parameters) - return problem_parameters +__all__ = [ + "EDATAS", + "FIM", + "LLH", + "RDATAS", + "RES", + "S2LLH", + "SLLH", + "SRES", + "aggregate_sllh", + "create_edatas", + "fill_in_parameters", + "create_parameter_mapping", + "rdatas_to_measurement_df", + "rdatas_to_simulation_df", + "rescale_sensitivity", + "simulate_petab", +] diff --git a/python/sdist/amici/petab_simulate.py b/python/sdist/amici/petab_simulate.py index 32c1ef8955..2dd25a8e4a 100644 --- a/python/sdist/amici/petab_simulate.py +++ b/python/sdist/amici/petab_simulate.py @@ -1,113 +1,20 @@ """ -PEtab Simulate --------------- -Functionality related to the use of AMICI for simulation with PEtab's -Simulator class. +Simulate a PEtab problem -Use cases: - -- generate data for use with PEtab's plotting methods -- generate synthetic data +.. deprecated:: 0.21.0 + Use :mod:`amici.petab.simulator` instead. """ +# THIS FILE IS TO BE REMOVED - DON'T ADD ANYTHING HERE! -import inspect -import sys -from typing import Callable +import warnings -import pandas as pd -import petab -from amici import AmiciModel, SensitivityMethod_none -from amici.petab_import import import_petab_problem -from amici.petab_objective import ( - RDATAS, - rdatas_to_measurement_df, - simulate_petab, +warnings.warn( + f"Importing {__name__} is deprecated. Use `amici.petab.simulator` instead.", + DeprecationWarning, ) -AMICI_MODEL = "amici_model" -AMICI_SOLVER = "solver" -MODEL_NAME = "model_name" -MODEL_OUTPUT_DIR = "model_output_dir" - -PETAB_PROBLEM = "petab_problem" - - -class PetabSimulator(petab.simulate.Simulator): - """Implementation of the PEtab `Simulator` class that uses AMICI.""" - - def __init__(self, *args, amici_model: AmiciModel = None, **kwargs): - super().__init__(*args, **kwargs) - self.amici_model = amici_model - - def simulate_without_noise(self, **kwargs) -> pd.DataFrame: - """ - See :py:func:`petab.simulate.Simulator.simulate()` docstring. - - Additional keyword arguments can be supplied to specify arguments for - the AMICI PEtab import, simulate, and export methods. See the - docstrings for the respective methods for argument options: - - :py:func:`amici.petab_import.import_petab_problem`, and - - :py:func:`amici.petab_objective.simulate_petab`. - - Note that some arguments are expected to have already been specified - in the Simulator constructor (including the PEtab problem). - """ - if AMICI_MODEL in {*kwargs, *dir(self)} and ( - any( - k in kwargs - for k in inspect.signature(import_petab_problem).parameters - ) - ): - print( - "Arguments related to the PEtab import are unused if " - f"`{AMICI_MODEL}` is specified, or the " - "`PetabSimulator.simulate()` method was previously called." - ) - - kwargs[PETAB_PROBLEM] = self.petab_problem - - # The AMICI model instance for the PEtab problem is saved in the state, - # such that it need not be supplied with each request for simulated - # data. Any user-supplied AMICI model will overwrite the model saved - # in the state. - if AMICI_MODEL not in kwargs: - if self.amici_model is None: - if MODEL_NAME not in kwargs: - kwargs[MODEL_NAME] = AMICI_MODEL - # If the model name is the name of a module that is already - # cached, it can cause issues during import. - while kwargs[MODEL_NAME] in sys.modules: - kwargs[MODEL_NAME] += str(self.rng.integers(10)) - if MODEL_OUTPUT_DIR not in kwargs: - kwargs[MODEL_OUTPUT_DIR] = self.working_dir - self.amici_model = _subset_call(import_petab_problem, kwargs) - kwargs[AMICI_MODEL] = self.amici_model - self.amici_model = kwargs[AMICI_MODEL] - - if AMICI_SOLVER not in kwargs: - kwargs[AMICI_SOLVER] = self.amici_model.getSolver() - kwargs[AMICI_SOLVER].setSensitivityMethod(SensitivityMethod_none) - - result = _subset_call(simulate_petab, kwargs) - return rdatas_to_measurement_df( - result[RDATAS], self.amici_model, self.petab_problem.measurement_df - ) - - -def _subset_call(method: Callable, kwargs: dict): - """ - Helper function to call a method with the intersection of arguments in the - method signature and the supplied arguments. +from .petab.simulator import PetabSimulator # noqa: F401 - :param method: - The method to be called. - :param kwargs: - The argument superset as a dictionary, similar to ``**kwargs`` in - method signatures. - :return: - The output of ``method``, called with the applicable arguments in - ``kwargs``. - """ - method_args = inspect.signature(method).parameters - subset_kwargs = {k: v for k, v in kwargs.items() if k in method_args} - return method(**subset_kwargs) +__all__ = [ + "PetabSimulator", +] diff --git a/python/sdist/amici/petab_util.py b/python/sdist/amici/petab_util.py index 9108b108bc..ff202bf2e0 100644 --- a/python/sdist/amici/petab_util.py +++ b/python/sdist/amici/petab_util.py @@ -1,107 +1,23 @@ -"""Various helper functions for working with PEtab problems.""" -import re -from typing import Dict, Tuple, Union +""" +Various helper functions for working with PEtab problems. -import libsbml -import pandas as pd -import petab -from petab.C import PREEQUILIBRATION_CONDITION_ID, SIMULATION_CONDITION_ID -from petab.mapping import resolve_mapping -from petab.models import MODEL_TYPE_PYSB, MODEL_TYPE_SBML +.. deprecated:: 0.21.0 + Use :mod:`amici.petab.util` instead. +""" -# ID of model parameter that is to be added to SBML model to indicate -# preequilibration -PREEQ_INDICATOR_ID = "preequilibration_indicator" +# THIS FILE IS TO BE REMOVED - DON'T ADD ANYTHING HERE! +import warnings -def get_states_in_condition_table( - petab_problem: petab.Problem, - condition: Union[Dict, pd.Series] = None, - return_patterns: bool = False, -) -> Dict[str, Tuple[Union[float, str, None], Union[float, str, None]]]: - """Get states and their initial condition as specified in the condition table. +from .petab import PREEQ_INDICATOR_ID # noqa: F401 +from .petab.util import get_states_in_condition_table # noqa: F401 - Returns: Dictionary: ``stateId -> (initial condition simulation, initial condition preequilibration)`` - """ - if petab_problem.model.type_id not in (MODEL_TYPE_SBML, MODEL_TYPE_PYSB): - raise NotImplementedError() +warnings.warn( + f"Importing {__name__} is deprecated. Use `amici.petab.util` instead.", + DeprecationWarning, +) - species_check_funs = { - MODEL_TYPE_SBML: lambda x: _element_is_sbml_state( - petab_problem.sbml_model, x - ), - MODEL_TYPE_PYSB: lambda x: _element_is_pysb_pattern( - petab_problem.model.model, x - ), - } - states = { - resolve_mapping(petab_problem.mapping_df, col): (None, None) - if condition is None - else ( - petab_problem.condition_df.loc[ - condition[SIMULATION_CONDITION_ID], col - ], - petab_problem.condition_df.loc[ - condition[PREEQUILIBRATION_CONDITION_ID], col - ] - if PREEQUILIBRATION_CONDITION_ID in condition - else None, - ) - for col in petab_problem.condition_df.columns - if species_check_funs[petab_problem.model.type_id]( - resolve_mapping(petab_problem.mapping_df, col) - ) - } - - if petab_problem.model.type_id == MODEL_TYPE_PYSB: - if return_patterns: - return states - import pysb.pattern - - if not petab_problem.model.model.species: - import pysb.bng - - pysb.bng.generate_equations(petab_problem.model.model) - - try: - spm = pysb.pattern.SpeciesPatternMatcher( - model=petab_problem.model.model - ) - except NotImplementedError as e: - raise NotImplementedError( - "Requires https://github.com/pysb/pysb/pull/570. " - "To use this functionality, update pysb via " - "`pip install git+https://github.com/FFroehlich/pysb@fix_pattern_matching`" - ) - - # expose model components as variables so we can evaluate patterns - for c in petab_problem.model.model.components: - globals()[c.name] = c - - states = { - f"__s{ix}": value - for pattern, value in states.items() - for ix in spm.match(eval(pattern), index=True, exact=True) - } - return states - - -def _element_is_pysb_pattern(model: "pysb.Model", element: str) -> bool: - """Check if element is a pysb pattern""" - if match := re.match(r"[a-zA-Z_][\w_]*\(", element): - return match[0][:-1] in [m.name for m in model.monomers] - return False - - -def _element_is_sbml_state(sbml_model: libsbml.Model, sbml_id: str) -> bool: - """Does the element with ID `sbml_id` correspond to a state variable?""" - if sbml_model.getCompartment(sbml_id) is not None: - return True - if sbml_model.getSpecies(sbml_id) is not None: - return True - if ( - rule := sbml_model.getRuleByVariable(sbml_id) - ) is not None and rule.getTypeCode() == libsbml.SBML_RATE_RULE: - return True - - return False +__all__ = [ + "get_states_in_condition_table", + "PREEQ_INDICATOR_ID", +] diff --git a/python/sdist/amici/plotting.py b/python/sdist/amici/plotting.py index bd1f3a8ba1..25607638d7 100644 --- a/python/sdist/amici/plotting.py +++ b/python/sdist/amici/plotting.py @@ -3,9 +3,11 @@ -------- Plotting related functions """ -from typing import Iterable, Optional, Sequence, Union +from typing import Optional, Union +from collections.abc import Iterable, Sequence import matplotlib.pyplot as plt +import numpy as np import pandas as pd import seaborn as sns from matplotlib.axes import Axes @@ -16,42 +18,54 @@ def plot_state_trajectories( rdata: ReturnDataView, - state_indices: Optional[Iterable[int]] = None, + state_indices: Optional[Sequence[int]] = None, ax: Optional[Axes] = None, model: Model = None, prefer_names: bool = True, + marker=None, ) -> None: """ - Plot state trajectories + Plot state trajectories. :param rdata: AMICI simulation results as returned by - :func:`amici.amici.runAmiciSimulation` - + :func:`amici.amici.runAmiciSimulation`. :param state_indices: - Indices of states for which trajectories are to be plotted - + Indices of state variables for which trajectories are to be plotted. :param ax: - matplotlib Axes instance to plot into - + :class:`matplotlib.pyplot.Axes` instance to plot into. :param model: - amici model instance - + The model *rdata* was generated from. :param prefer_names: Whether state names should be preferred over IDs, if available. + :param marker: + Point marker for plotting (see + `matplotlib documentation `_). """ if not ax: fig, ax = plt.subplots() if not state_indices: state_indices = range(rdata["x"].shape[1]) - for ix in state_indices: - if model is None: - label = f"$x_{{{ix}}}$" - elif prefer_names and model.getStateNames()[ix]: - label = model.getStateNames()[ix] - else: - label = model.getStateIds()[ix] - ax.plot(rdata["t"], rdata["x"][:, ix], label=label) + + if marker is None: + # Show marker if only one time point is available, + # otherwise nothing will be shown + marker = "o" if len(rdata.t) == 1 else None + + if model is None and rdata.ptr.state_ids is None: + labels = [f"$x_{{{ix}}}$" for ix in state_indices] + elif model is not None and prefer_names: + labels = np.asarray(model.getStateNames())[list(state_indices)] + labels = [ + l if l else model.getStateIds()[ix] for ix, l in enumerate(labels) + ] + elif model is not None: + labels = np.asarray(model.getStateIds())[list(state_indices)] + else: + labels = np.asarray(rdata.ptr.state_ids)[list(state_indices)] + + for ix, label in zip(state_indices, labels): + ax.plot(rdata["t"], rdata["x"][:, ix], marker=marker, label=label) ax.set_xlabel("$t$") ax.set_ylabel("$x(t)$") ax.legend() @@ -64,38 +78,54 @@ def plot_observable_trajectories( ax: Optional[Axes] = None, model: Model = None, prefer_names: bool = True, + marker=None, ) -> None: """ - Plot observable trajectories + Plot observable trajectories. :param rdata: AMICI simulation results as returned by - :func:`amici.amici.runAmiciSimulation` - + :func:`amici.amici.runAmiciSimulation`. :param observable_indices: - Indices of observables for which trajectories are to be plotted - + Indices of observables for which trajectories are to be plotted. :param ax: - matplotlib Axes instance to plot into - + :class:`matplotlib.pyplot.Axes` instance to plot into. :param model: - amici model instance - + The model *rdata* was generated from. :param prefer_names: - Whether observables names should be preferred over IDs, if available. + Whether observable names should be preferred over IDs, if available. + :param marker: + Point marker for plotting (see + `matplotlib documentation `_). + """ if not ax: fig, ax = plt.subplots() if not observable_indices: observable_indices = range(rdata["y"].shape[1]) - for iy in observable_indices: - if model is None: - label = f"$y_{{{iy}}}$" - elif prefer_names and model.getObservableNames()[iy]: - label = model.getObservableNames()[iy] - else: - label = model.getObservableIds()[iy] - ax.plot(rdata["t"], rdata["y"][:, iy], label=label) + + if marker is None: + # Show marker if only one time point is available, + # otherwise nothing will be shown + marker = "o" if len(rdata.t) == 1 else None + + if model is None and rdata.ptr.observable_ids is None: + labels = [f"$y_{{{iy}}}$" for iy in observable_indices] + elif model is not None and prefer_names: + labels = np.asarray(model.getObservableNames())[ + list(observable_indices) + ] + labels = [ + l if l else model.getObservableIds()[ix] + for ix, l in enumerate(labels) + ] + elif model is not None: + labels = np.asarray(model.getObservableIds())[list(observable_indices)] + else: + labels = np.asarray(rdata.ptr.observable_ids)[list(observable_indices)] + + for iy, label in zip(observable_indices, labels): + ax.plot(rdata["t"], rdata["y"][:, iy], marker=marker, label=label) ax.set_xlabel("$t$") ax.set_ylabel("$y(t)$") ax.legend() @@ -106,8 +136,8 @@ def plot_jacobian(rdata: ReturnDataView): """Plot Jacobian as heatmap.""" df = pd.DataFrame( data=rdata.J, - index=rdata._swigptr.state_ids_solver, - columns=rdata._swigptr.state_ids_solver, + index=rdata.ptr.state_ids_solver, + columns=rdata.ptr.state_ids_solver, ) sns.heatmap(df, center=0.0) plt.title("Jacobian") @@ -124,10 +154,10 @@ def plot_expressions( """Plot the given expressions evaluated on the given simulation outputs. :param exprs: - A symbolic expression, e.g. a sympy expression or a string that can be sympified. - Can include state variable, expression, and observable IDs, depending on whether - the respective data is available in the simulation results. - Parameters are not yet supported. + A symbolic expression, e.g., a sympy expression or a string that can be + sympified. It Can include state variable, expression, and + observable IDs, depending on whether the respective data is available + in the simulation results. Parameters are not yet supported. :param rdata: The simulation results. """ diff --git a/python/sdist/amici/pysb_import.py b/python/sdist/amici/pysb_import.py index aa1dc7cd9b..4f843033f1 100644 --- a/python/sdist/amici/pysb_import.py +++ b/python/sdist/amici/pysb_import.py @@ -13,14 +13,10 @@ from typing import ( Any, Callable, - Dict, - Iterable, - List, Optional, - Set, - Tuple, Union, ) +from collections.abc import Iterable import numpy as np import pysb @@ -49,8 +45,8 @@ ) from .logging import get_logger, log_execution_time, set_log_level -CL_Prototype = Dict[str, Dict[str, Any]] -ConservationLaw = Dict[str, Union[Dict, str, sp.Basic]] +CL_Prototype = dict[str, dict[str, Any]] +ConservationLaw = dict[str, Union[dict, str, sp.Basic]] logger = get_logger(__name__, logging.ERROR) @@ -58,10 +54,10 @@ def pysb2amici( model: pysb.Model, output_dir: Optional[Union[str, Path]] = None, - observables: List[str] = None, - constant_parameters: List[str] = None, - sigmas: Dict[str, str] = None, - noise_distributions: Optional[Dict[str, Union[str, Callable]]] = None, + observables: list[str] = None, + constant_parameters: list[str] = None, + sigmas: dict[str, str] = None, + noise_distributions: Optional[dict[str, Union[str, Callable]]] = None, verbose: Union[int, bool] = False, assume_pow_positivity: bool = False, compiler: str = None, @@ -191,10 +187,10 @@ def pysb2amici( @log_execution_time("creating ODE model", logger) def ode_model_from_pysb_importer( model: pysb.Model, - constant_parameters: List[str] = None, - observables: List[str] = None, - sigmas: Dict[str, str] = None, - noise_distributions: Optional[Dict[str, Union[str, Callable]]] = None, + constant_parameters: list[str] = None, + observables: list[str] = None, + sigmas: dict[str, str] = None, + noise_distributions: Optional[dict[str, Union[str, Callable]]] = None, compute_conservation_laws: bool = True, simplify: Callable = sp.powsimp, # Do not enable by default without testing. @@ -285,7 +281,7 @@ def ode_model_from_pysb_importer( @log_execution_time("processing PySB stoich. matrix", logger) def _process_stoichiometric_matrix( - pysb_model: pysb.Model, ode_model: DEModel, constant_parameters: List[str] + pysb_model: pysb.Model, ode_model: DEModel, constant_parameters: list[str] ) -> None: """ Exploits the PySB stoichiometric matrix to generate xdot derivatives @@ -410,12 +406,12 @@ def _process_pysb_species(pysb_model: pysb.Model, ode_model: DEModel) -> None: sp.Symbol(f"__s{ix}"), f"{specie}", init, xdot[ix] ) ) - logger.debug(f"Finished Processing PySB species ") + logger.debug("Finished Processing PySB species ") @log_execution_time("processing PySB parameters", logger) def _process_pysb_parameters( - pysb_model: pysb.Model, ode_model: DEModel, constant_parameters: List[str] + pysb_model: pysb.Model, ode_model: DEModel, constant_parameters: list[str] ) -> None: """ Converts pysb parameters into Parameters or Constants and adds them to @@ -443,9 +439,9 @@ def _process_pysb_parameters( def _process_pysb_expressions( pysb_model: pysb.Model, ode_model: DEModel, - observables: List[str], - sigmas: Dict[str, str], - noise_distributions: Optional[Dict[str, Union[str, Callable]]] = None, + observables: list[str], + sigmas: dict[str, str], + noise_distributions: Optional[dict[str, Union[str, Callable]]] = None, ) -> None: r""" Converts pysb expressions/observables into Observables (with @@ -508,9 +504,9 @@ def _add_expression( expr: sp.Basic, pysb_model: pysb.Model, ode_model: DEModel, - observables: List[str], - sigmas: Dict[str, str], - noise_distributions: Optional[Dict[str, Union[str, Callable]]] = None, + observables: list[str], + sigmas: dict[str, str], + noise_distributions: Optional[dict[str, Union[str, Callable]]] = None, ): """ Adds expressions to the ODE model given and adds observables/sigmas if @@ -579,8 +575,8 @@ def _add_expression( def _get_sigma_name_and_value( - pysb_model: pysb.Model, obs_name: str, sigmas: Dict[str, str] -) -> Tuple[str, sp.Basic]: + pysb_model: pysb.Model, obs_name: str, sigmas: dict[str, str] +) -> tuple[str, sp.Basic]: """ Tries to extract standard deviation symbolic identifier and formula for a given observable name from the pysb model and if no specification is @@ -623,9 +619,9 @@ def _get_sigma_name_and_value( def _process_pysb_observables( pysb_model: pysb.Model, ode_model: DEModel, - observables: List[str], - sigmas: Dict[str, str], - noise_distributions: Optional[Dict[str, Union[str, Callable]]] = None, + observables: list[str], + sigmas: dict[str, str], + noise_distributions: Optional[dict[str, Union[str, Callable]]] = None, ) -> None: """ Converts :class:`pysb.core.Observable` into @@ -704,7 +700,7 @@ def _process_pysb_conservation_laws( def _compute_monomers_with_fixed_initial_conditions( pysb_model: pysb.Model, -) -> Set[str]: +) -> set[str]: """ Computes the set of monomers in a model with species that have fixed initial conditions @@ -988,7 +984,7 @@ def _cl_has_cycle(monomer: str, cl_prototypes: CL_Prototype) -> bool: def _is_in_cycle( - monomer: str, cl_prototypes: CL_Prototype, visited: List[str], root: str + monomer: str, cl_prototypes: CL_Prototype, visited: list[str], root: str ) -> bool: """ Recursively checks for cycles in conservation law dependencies via @@ -1131,7 +1127,7 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None: del prototype["appearance_counts"][prototype["local_index"]] -def _get_target_indices(cl_prototypes: CL_Prototype) -> List[List[int]]: +def _get_target_indices(cl_prototypes: CL_Prototype) -> list[list[int]]: """ Computes the list target indices for the current conservation law prototype @@ -1147,7 +1143,7 @@ def _get_target_indices(cl_prototypes: CL_Prototype) -> List[List[int]]: def _construct_conservation_from_prototypes( cl_prototypes: CL_Prototype, pysb_model: pysb.Model -) -> List[ConservationLaw]: +) -> list[ConservationLaw]: """ Computes the algebraic expression for the total amount of a given monomer @@ -1183,7 +1179,7 @@ def _construct_conservation_from_prototypes( def _add_conservation_for_constant_species( - ode_model: DEModel, conservation_laws: List[ConservationLaw] + ode_model: DEModel, conservation_laws: list[ConservationLaw] ) -> None: """ Computes the algebraic expression for the total amount of a given @@ -1209,7 +1205,7 @@ def _add_conservation_for_constant_species( def _flatten_conservation_laws( - conservation_laws: List[ConservationLaw], + conservation_laws: list[ConservationLaw], ) -> None: """ Flatten the conservation laws such that the state_expr not longer @@ -1233,7 +1229,7 @@ def _flatten_conservation_laws( def _apply_conseration_law_sub( - cl: ConservationLaw, sub: Tuple[sp.Symbol, ConservationLaw] + cl: ConservationLaw, sub: tuple[sp.Symbol, ConservationLaw] ) -> bool: """ Applies a substitution to a conservation law by replacing the @@ -1288,8 +1284,8 @@ def _state_in_cl_formula(state: sp.Symbol, cl: ConservationLaw) -> bool: def _get_conservation_law_subs( - conservation_laws: List[ConservationLaw], -) -> List[Tuple[sp.Symbol, Dict[sp.Symbol, sp.Expr]]]: + conservation_laws: list[ConservationLaw], +) -> list[tuple[sp.Symbol, dict[sp.Symbol, sp.Expr]]]: """ Computes a list of (state, coeffs) tuples for conservation laws that still appear in other conservation laws @@ -1353,8 +1349,8 @@ def has_fixed_parameter_ic( def extract_monomers( - complex_patterns: Union[pysb.ComplexPattern, List[pysb.ComplexPattern]] -) -> List[str]: + complex_patterns: Union[pysb.ComplexPattern, list[pysb.ComplexPattern]], +) -> list[str]: """ Constructs a list of monomer names contained in complex patterns. Multiplicity of names corresponds to the stoichiometry in the complex. @@ -1377,7 +1373,7 @@ def extract_monomers( def _get_unconserved_monomers( rule: pysb.Rule, pysb_model: pysb.Model -) -> Set[str]: +) -> set[str]: """ Constructs the set of monomer names for which the specified rule changes the stoichiometry of the monomer in the specified model. @@ -1419,9 +1415,9 @@ def _get_unconserved_monomers( def _get_changed_stoichiometries( - reactants: Union[pysb.ComplexPattern, List[pysb.ComplexPattern]], - products: Union[pysb.ComplexPattern, List[pysb.ComplexPattern]], -) -> Set[str]: + reactants: Union[pysb.ComplexPattern, list[pysb.ComplexPattern]], + products: Union[pysb.ComplexPattern, list[pysb.ComplexPattern]], +) -> set[str]: """ Constructs the set of monomer names which have different stoichiometries in reactants and products. diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py index dd24b98cf8..8d36ad7b81 100644 --- a/python/sdist/amici/sbml_import.py +++ b/python/sdist/amici/sbml_import.py @@ -16,15 +16,10 @@ from typing import ( Any, Callable, - Dict, - Iterable, - List, Optional, - Sequence, - Set, - Tuple, Union, ) +from collections.abc import Iterable, Sequence import libsbml as sbml import numpy as np @@ -38,7 +33,6 @@ DEModel, _default_simplify, smart_is_zero_matrix, - symbol_with_assumptions, ) from .import_utils import ( RESERVED_SYMBOLS, @@ -54,18 +48,19 @@ sbml_time_symbol, smart_subs, smart_subs_dict, + symbol_with_assumptions, toposort_symbols, ) from .logging import get_logger, log_execution_time, set_log_level from .sbml_utils import SBMLException, _parse_logical_operators from .splines import AbstractSpline -SymbolicFormula = Dict[sp.Symbol, sp.Expr] +SymbolicFormula = dict[sp.Symbol, sp.Expr] default_symbols = {symbol: {} for symbol in SymbolId} -ConservationLaw = Dict[str, Union[str, sp.Expr]] +ConservationLaw = dict[str, Union[str, sp.Expr]] logger = get_logger(__name__, logging.ERROR) @@ -177,9 +172,9 @@ def __init__( self.sbml: sbml.Model = self.sbml_doc.getModel() # Long and short names for model components - self.symbols: Dict[SymbolId, Dict[sp.Symbol, Dict[str, Any]]] = {} + self.symbols: dict[SymbolId, dict[sp.Symbol, dict[str, Any]]] = {} - self._local_symbols: Dict[str, Union[sp.Expr, sp.Function]] = {} + self._local_symbols: dict[str, Union[sp.Expr, sp.Function]] = {} self.compartments: SymbolicFormula = {} self.compartment_assignment_rules: SymbolicFormula = {} self.species_assignment_rules: SymbolicFormula = {} @@ -279,13 +274,13 @@ def sbml2amici( self, model_name: str, output_dir: Union[str, Path] = None, - observables: Dict[str, Dict[str, str]] = None, - event_observables: Dict[str, Dict[str, str]] = None, + observables: dict[str, dict[str, str]] = None, + event_observables: dict[str, dict[str, str]] = None, constant_parameters: Iterable[str] = None, - sigmas: Dict[str, Union[str, float]] = None, - event_sigmas: Dict[str, Union[str, float]] = None, - noise_distributions: Dict[str, Union[str, Callable]] = None, - event_noise_distributions: Dict[str, Union[str, Callable]] = None, + sigmas: dict[str, Union[str, float]] = None, + event_sigmas: dict[str, Union[str, float]] = None, + noise_distributions: dict[str, Union[str, Callable]] = None, + event_noise_distributions: dict[str, Union[str, Callable]] = None, verbose: Union[int, bool] = logging.ERROR, assume_pow_positivity: bool = False, compiler: str = None, @@ -457,13 +452,13 @@ def sbml2amici( def _build_ode_model( self, - observables: Dict[str, Dict[str, str]] = None, - event_observables: Dict[str, Dict[str, str]] = None, + observables: dict[str, dict[str, str]] = None, + event_observables: dict[str, dict[str, str]] = None, constant_parameters: Iterable[str] = None, - sigmas: Dict[str, Union[str, float]] = None, - event_sigmas: Dict[str, Union[str, float]] = None, - noise_distributions: Dict[str, Union[str, Callable]] = None, - event_noise_distributions: Dict[str, Union[str, Callable]] = None, + sigmas: dict[str, Union[str, float]] = None, + event_sigmas: dict[str, Union[str, float]] = None, + noise_distributions: dict[str, Union[str, Callable]] = None, + event_noise_distributions: dict[str, Union[str, Callable]] = None, verbose: Union[int, bool] = logging.ERROR, compute_conservation_laws: bool = True, simplify: Optional[Callable] = _default_simplify, @@ -547,7 +542,7 @@ def _build_ode_model( @log_execution_time("importing SBML", logger) def _process_sbml( self, - constant_parameters: List[str] = None, + constant_parameters: list[str] = None, hardcode_symbols: Sequence[str] = None, ) -> None: """ @@ -1035,7 +1030,7 @@ def _process_annotations(self) -> None: @log_execution_time("processing SBML parameters", logger) def _process_parameters( self, - constant_parameters: List[str] = None, + constant_parameters: list[str] = None, hardcode_symbols: Sequence[str] = None, ) -> None: """ @@ -1165,7 +1160,7 @@ def _process_reactions(self): # accounts for possibly variable compartments. self.stoichiometric_matrix[ species["index"], reaction_index - ] += (sign * stoichiometry * species["conversion_factor"]) + ] += sign * stoichiometry * species["conversion_factor"] if reaction.isSetId(): sym_math = self._local_symbols[reaction.getId()] else: @@ -1605,9 +1600,9 @@ def get_empty_bolus_value() -> sp.Float: @log_execution_time("processing SBML observables", logger) def _process_observables( self, - observables: Union[Dict[str, Dict[str, str]], None], - sigmas: Dict[str, Union[str, float]], - noise_distributions: Dict[str, str], + observables: Union[dict[str, dict[str, str]], None], + sigmas: dict[str, Union[str, float]], + noise_distributions: dict[str, str], ) -> None: """ Perform symbolic computations required for observable and objective @@ -1671,9 +1666,9 @@ def _process_observables( @log_execution_time("processing SBML event observables", logger) def _process_event_observables( self, - event_observables: Dict[str, Dict[str, str]], - event_sigmas: Dict[str, Union[str, float]], - event_noise_distributions: Dict[str, str], + event_observables: dict[str, dict[str, str]], + event_sigmas: dict[str, Union[str, float]], + event_noise_distributions: dict[str, str], ) -> None: """ Perform symbolic computations required for observable and objective @@ -1788,8 +1783,8 @@ def _generate_default_observables(self): def _process_log_likelihood( self, - sigmas: Dict[str, Union[str, float]], - noise_distributions: Dict[str, str], + sigmas: dict[str, Union[str, float]], + noise_distributions: dict[str, str], events: bool = False, event_reg: bool = False, ): @@ -2017,7 +2012,7 @@ def process_conservation_laws(self, ode_model) -> None: def _get_conservation_laws_demartino( self, ode_model: DEModel, - ) -> List[Tuple[int, List[int], List[float]]]: + ) -> list[tuple[int, list[int], list[float]]]: """Identify conservation laws based on algorithm by DeMartino et al. (see conserved_moieties.py). @@ -2093,7 +2088,7 @@ def _get_conservation_laws_demartino( def _get_conservation_laws_rref( self, - ) -> List[Tuple[int, List[int], List[float]]]: + ) -> list[tuple[int, list[int], list[float]]]: """Identify conservation laws based on left nullspace of the stoichiometric matrix, computed through (numeric) Gaussian elimination @@ -2156,8 +2151,8 @@ def _get_conservation_laws_rref( return raw_cls def _add_conservation_for_non_constant_species( - self, model: DEModel, conservation_laws: List[ConservationLaw] - ) -> List[int]: + self, model: DEModel, conservation_laws: list[ConservationLaw] + ) -> list[int]: """Add non-constant species to conservation laws :param model: @@ -2360,9 +2355,9 @@ def _replace_in_all_expressions( # rule (at the end of the _process_species method), hence needs to be # processed here too. self.compartments = { - smart_subs(c, old, new) - if replace_identifiers - else c: smart_subs(v, old, self._make_initial(new)) + smart_subs(c, old, new) if replace_identifiers else c: smart_subs( + v, old, self._make_initial(new) + ) for c, v in self.compartments.items() } @@ -2640,8 +2635,8 @@ def assignmentRules2observables( def _add_conservation_for_constant_species( - ode_model: DEModel, conservation_laws: List[ConservationLaw] -) -> List[int]: + ode_model: DEModel, conservation_laws: list[ConservationLaw] +) -> list[int]: """ Adds constant species to conservations laws @@ -2738,7 +2733,7 @@ def get_species_initial(species: sbml.Species) -> sp.Expr: def _get_list_of_species_references( sbml_model: sbml.Model, -) -> List[sbml.SpeciesReference]: +) -> list[sbml.SpeciesReference]: """ Extracts list of species references as SBML doesn't provide a native function for this. @@ -2776,15 +2771,17 @@ def replace_logx(math_str: Union[str, float, None]) -> Union[str, float, None]: return re.sub(r"(^|\W)log(\d+)\(", r"\g<1>1/ln(\2)*ln(", math_str) -def _collect_event_assignment_parameter_targets(sbml_model: sbml.Model): - targets = set() +def _collect_event_assignment_parameter_targets( + sbml_model: sbml.Model, +) -> list[sp.Symbol]: + targets = [] sbml_parameters = sbml_model.getListOfParameters() sbml_parameter_ids = [p.getId() for p in sbml_parameters] for event in sbml_model.getListOfEvents(): for event_assignment in event.getListOfEventAssignments(): target_id = event_assignment.getVariable() if target_id in sbml_parameter_ids: - targets.add( + targets.append( _get_identifier_symbol( sbml_parameters[sbml_parameter_ids.index(target_id)] ) @@ -2811,9 +2808,9 @@ def _parse_special_functions_sbml( def _validate_observables( - observables: Union[Dict[str, Dict[str, str]], None], - sigmas: Dict[str, Union[str, float]], - noise_distributions: Dict[str, str], + observables: Union[dict[str, dict[str, str]], None], + sigmas: dict[str, Union[str, float]], + noise_distributions: dict[str, str], events: bool = False, ) -> None: if observables is None or not observables: @@ -2841,7 +2838,7 @@ def _validate_observables( def _check_symbol_nesting( - symbols: Dict[sp.Symbol, Dict[str, sp.Expr]], symbol_type: str + symbols: dict[sp.Symbol, dict[str, sp.Expr]], symbol_type: str ): observable_syms = set(symbols.keys()) for obs in symbols.values(): diff --git a/python/sdist/amici/sbml_utils.py b/python/sdist/amici/sbml_utils.py index 66c9d01bbc..d40610f4ab 100644 --- a/python/sdist/amici/sbml_utils.py +++ b/python/sdist/amici/sbml_utils.py @@ -11,7 +11,7 @@ import sympy as sp if TYPE_CHECKING: - from typing import Any, Dict, Optional, Tuple, Union + from typing import Any, Union SbmlID = Union[str, sp.Symbol] @@ -52,7 +52,7 @@ class SbmlAnnotationError(SBMLException): def create_sbml_model( model_id: str, level: int = 2, version: int = 5 -) -> Tuple[libsbml.SBMLDocument, libsbml.Model]: +) -> tuple[libsbml.SBMLDocument, libsbml.Model]: """Helper for creating an empty SBML model. :param model_id: @@ -116,10 +116,10 @@ def add_species( model: libsbml.Model, species_id: SbmlID, *, - compartment_id: Optional[str] = None, - name: Union[bool, str] = False, + compartment_id: str | None = None, + name: bool | str = False, initial_amount: float = 0.0, - units: Optional[str] = None, + units: str | None = None, ) -> libsbml.Species: """Helper for adding a species to a SBML model. @@ -182,10 +182,10 @@ def add_parameter( model: libsbml.Model, parameter_id: SbmlID, *, - name: Union[bool, str] = False, - value: Optional[float] = None, - units: Optional[str] = None, - constant: Optional[bool] = None, + name: bool | str = False, + value: float | None = None, + units: str | None = None, + constant: bool | None = None, ) -> libsbml.Parameter: """Helper for adding a parameter to a SBML model. @@ -239,7 +239,7 @@ def add_assignment_rule( model: libsbml.Model, variable_id: SbmlID, formula, - rule_id: Optional[str] = None, + rule_id: str | None = None, ) -> libsbml.AssignmentRule: """Helper for adding an assignment rule to a SBML model. @@ -287,7 +287,7 @@ def add_rate_rule( model: libsbml.Model, variable_id: SbmlID, formula, - rule_id: Optional[str] = None, + rule_id: str | None = None, ) -> libsbml.RateRule: """ Helper for adding a rate rule to a SBML model. @@ -337,7 +337,7 @@ def add_inflow( species_id: SbmlID, rate, *, - reaction_id: Optional[str] = None, + reaction_id: str | None = None, reversible: bool = False, ) -> libsbml.Reaction: species_id = str(species_id) @@ -364,9 +364,7 @@ def add_inflow( return reaction -def get_sbml_units( - model: libsbml.Model, x: Union[SbmlID, sp.Basic] -) -> Union[None, str]: +def get_sbml_units(model: libsbml.Model, x: SbmlID | sp.Basic) -> None | str: """Try to get the units for expression `x`. :param model: @@ -493,7 +491,7 @@ def mathml2sympy( mathml: str, *, evaluate: bool = False, - locals: Optional[Dict[str, Any]] = None, + locals: dict[str, Any] | None = None, expression_type: str = "mathml2sympy", ) -> sp.Basic: ast = libsbml.readMathMLFromString(mathml) @@ -519,8 +517,8 @@ def mathml2sympy( def _parse_logical_operators( - math_str: Union[str, float, None] -) -> Union[str, float, None]: + math_str: str | float | None, +) -> str | float | None: """ Parses a math string in order to replace logical operators by a form parsable for sympy diff --git a/python/sdist/amici/setup.template.py b/python/sdist/amici/setup.template.py index e7995e2c52..599c4df49b 100644 --- a/python/sdist/amici/setup.template.py +++ b/python/sdist/amici/setup.template.py @@ -1,5 +1,6 @@ """AMICI model package setup""" import os +import sys from pathlib import Path from amici import _get_amici_path @@ -33,6 +34,7 @@ def get_extension() -> CMakeExtension: f"{prefix_path.as_posix()}/lib64/cmake/SuiteSparse", f"-DKLU_ROOT={prefix_path.as_posix()}", "-DAMICI_PYTHON_BUILD_EXT_ONLY=ON", + f"-DPython3_EXECUTABLE={Path(sys.executable).as_posix()}", ], ) diff --git a/python/sdist/amici/splines.py b/python/sdist/amici/splines.py index fdb0912045..d55a78137b 100644 --- a/python/sdist/amici/splines.py +++ b/python/sdist/amici/splines.py @@ -14,20 +14,15 @@ from typing import ( Any, Callable, - Dict, - List, - Optional, - Sequence, - Set, - Tuple, Union, ) + from collections.abc import Sequence from . import sbml_import - BClike = Union[None, str, Tuple[Union[None, str], Union[None, str]]] + BClike = Union[None, str, tuple[Union[None, str], Union[None, str]]] - NormalizedBC = Tuple[Union[None, str], Union[None, str]] + NormalizedBC = tuple[Union[None, str], Union[None, str]] import collections.abc import logging @@ -88,11 +83,11 @@ class UniformGrid(collections.abc.Sequence): def __init__( self, - start: Union[Real, sp.Basic], - stop: Union[Real, sp.Basic], - step: Optional[Union[Real, sp.Basic]] = None, + start: Real | sp.Basic, + stop: Real | sp.Basic, + step: Real | sp.Basic | None = None, *, - number_of_nodes: Optional[Integral] = None, + number_of_nodes: Integral | None = None, always_include_stop: bool = True, ): """Create a new ``UniformGrid``. @@ -219,11 +214,11 @@ class AbstractSpline(ABC): def __init__( self, - sbml_id: Union[str, sp.Symbol], + sbml_id: str | sp.Symbol, nodes: Sequence, values_at_nodes: Sequence, *, - evaluate_at: Optional[Union[str, sp.Basic]] = None, + evaluate_at: str | sp.Basic | None = None, bc: BClike = None, extrapolate: BClike = None, logarithmic_parametrization: bool = False, @@ -418,7 +413,7 @@ def _normalize_bc(bc: BClike) -> NormalizedBC: def _normalize_extrapolate( self, bc: NormalizedBC, extrapolate: BClike - ) -> Tuple[NormalizedBC, NormalizedBC]: + ) -> tuple[NormalizedBC, NormalizedBC]: """ Preprocess `extrapolate` to a standard form and perform consistency checks @@ -576,10 +571,10 @@ def check_if_valid(self, importer: sbml_import.SbmlImporter) -> None: # until (if at all) they are accounted for. from .de_export import SymbolId - fixed_parameters: List[sp.Symbol] = list( + fixed_parameters: list[sp.Symbol] = list( importer.symbols[SymbolId.FIXED_PARAMETER].keys() ) - species: List[sp.Symbol] = list( + species: list[sp.Symbol] = list( importer.symbols[SymbolId.SPECIES].keys() ) @@ -606,9 +601,7 @@ def check_if_valid(self, importer: sbml_import.SbmlImporter) -> None: if not np.all(np.diff(nodes_values) >= 0): raise ValueError("nodes should be strictly increasing!") - def poly( - self, i: Integral, *, x: Union[Real, sp.Basic] = None - ) -> sp.Basic: + def poly(self, i: Integral, *, x: Real | sp.Basic = None) -> sp.Basic: """ Get the polynomial interpolant on the ``(nodes[i], nodes[i+1])`` interval. The polynomial is written in Horner form with respect to the scaled @@ -646,7 +639,7 @@ def poly( with evaluate(False): return poly.subs(t, t_value) - def poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic: + def poly_variable(self, x: Real | sp.Basic, i: Integral) -> sp.Basic: """ Given an evaluation point, return the value of the variable in which the polynomial on the ``i``-th interval is expressed. @@ -656,15 +649,13 @@ def poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic: return self._poly_variable(x, i) @abstractmethod - def _poly_variable( - self, x: Union[Real, sp.Basic], i: Integral - ) -> sp.Basic: + def _poly_variable(self, x: Real | sp.Basic, i: Integral) -> sp.Basic: """This function (and not poly_variable) should be implemented by the subclasses""" raise NotImplementedError() @abstractmethod - def _poly(self, t: Union[Real, sp.Basic], i: Integral) -> sp.Basic: + def _poly(self, t: Real | sp.Basic, i: Integral) -> sp.Basic: """ Return the symbolic expression for the spline restricted to the `i`-th interval as a polynomial in the scaled variable `t`. @@ -672,7 +663,7 @@ def _poly(self, t: Union[Real, sp.Basic], i: Integral) -> sp.Basic: raise NotImplementedError() def segment_formula( - self, i: Integral, *, x: Union[Real, sp.Basic] = None + self, i: Integral, *, x: Real | sp.Basic = None ) -> sp.Basic: """ Return the formula for the actual value of the spline expression @@ -700,7 +691,7 @@ def y_scaled(self, i: Integral): @property def extrapolation_formulas( self, - ) -> Tuple[Union[None, sp.Basic], Union[None, sp.Basic]]: + ) -> tuple[None | sp.Basic, None | sp.Basic]: """ Returns the extrapolation formulas on the left and right side of the interval ``(nodes[0], nodes[-1])``. @@ -710,9 +701,9 @@ def extrapolation_formulas( def _extrapolation_formulas( self, - x: Union[Real, sp.Basic], - extrapolate: Optional[NormalizedBC] = None, - ) -> Tuple[Union[None, sp.Expr], Union[None, sp.Expr]]: + x: Real | sp.Basic, + extrapolate: NormalizedBC | None = None, + ) -> tuple[None | sp.Expr, None | sp.Expr]: if extrapolate is None: extr_left, extr_right = self.extrapolate else: @@ -770,7 +761,7 @@ def mathml_formula(self) -> sp.Piecewise: def _formula( self, *, - x: Union[Real, sp.Basic] = None, + x: Real | sp.Basic = None, sbml_syms: bool = False, sbml_ops: bool = False, cache: bool = True, @@ -845,15 +836,15 @@ def _formula( return formula @property - def period(self) -> Union[sp.Basic, None]: + def period(self) -> sp.Basic | None: """Period of a periodic spline. `None` if the spline is not periodic.""" if self.bc == ("periodic", "periodic"): return self.nodes[-1] - self.nodes[0] return None def _to_base_interval( - self, x: Union[Real, sp.Basic], *, with_interval_number: bool = False - ) -> Union[sp.Basic, Tuple[sp.core.numbers.Integer, sp.Basic]]: + self, x: Real | sp.Basic, *, with_interval_number: bool = False + ) -> sp.Basic | tuple[sp.core.numbers.Integer, sp.Basic]: """For periodic splines, maps the real point `x` to the reference period.""" if self.bc != ("periodic", "periodic"): @@ -874,19 +865,19 @@ def _to_base_interval( return k, z return z - def evaluate(self, x: Union[Real, sp.Basic]) -> sp.Basic: + def evaluate(self, x: Real | sp.Basic) -> sp.Basic: """Evaluate the spline at the point `x`.""" _x = sp.Dummy("x") return self._formula(x=_x, cache=False).subs(_x, x) - def derivative(self, x: Union[Real, sp.Basic], **kwargs) -> sp.Expr: + def derivative(self, x: Real | sp.Basic, **kwargs) -> sp.Expr: """Evaluate the spline derivative at the point `x`.""" # NB kwargs are used to pass on extrapolate=None # when called from .extrapolation_formulas() _x = sp.Dummy("x") return self._formula(x=_x, cache=False, **kwargs).diff(_x).subs(_x, x) - def second_derivative(self, x: Union[Real, sp.Basic]) -> sp.Basic: + def second_derivative(self, x: Real | sp.Basic) -> sp.Basic: """Evaluate the spline second derivative at the point `x`.""" _x = sp.Dummy("x") return self._formula(x=_x, cache=False).diff(_x).diff(_x).subs(_x, x) @@ -907,9 +898,7 @@ def squared_L2_norm_of_curvature(self) -> sp.Basic: ) return sp.simplify(integral) - def integrate( - self, x0: Union[Real, sp.Basic], x1: Union[Real, sp.Basic] - ) -> sp.Basic: + def integrate(self, x0: Real | sp.Basic, x1: Real | sp.Basic) -> sp.Basic: """Integrate the spline between the points `x0` and `x1`.""" x = sp.Dummy("x") x0, x1 = sp.sympify((x0, x1)) @@ -969,7 +958,7 @@ def amici_annotation(self) -> str: # Check XML and prettify return pretty_xml(annotation) - def _annotation_attributes(self) -> Dict[str, Any]: + def _annotation_attributes(self) -> dict[str, Any]: attributes = {"spline_method": self.method} if self.bc[0] == self.bc[1]: @@ -996,7 +985,7 @@ def _annotation_attributes(self) -> Dict[str, Any]: return attributes - def _annotation_children(self) -> Dict[str, Union[str, List[str]]]: + def _annotation_children(self) -> dict[str, str | list[str]]: children = {} with evaluate(False): @@ -1024,12 +1013,12 @@ def add_to_sbml_model( self, model: libsbml.Model, *, - auto_add: Union[bool, str] = False, - x_nominal: Optional[Sequence[float]] = None, - y_nominal: Optional[Union[Sequence[float], float]] = None, - x_units: Optional[str] = None, - y_units: Optional[str] = None, - y_constant: Optional[Union[Sequence[bool], bool]] = None, + auto_add: bool | str = False, + x_nominal: Sequence[float] | None = None, + y_nominal: Sequence[float] | float | None = None, + x_units: str | None = None, + y_units: str | None = None, + y_constant: Sequence[bool] | bool | None = None, ) -> None: """ Function to add the spline to an SBML model using an assignment rule @@ -1196,7 +1185,7 @@ def is_spline(rule: libsbml.AssignmentRule) -> bool: @staticmethod def get_annotation( rule: libsbml.AssignmentRule, - ) -> Union[ET.Element, None]: + ) -> ET.Element | None: """ Extract AMICI spline annotation from an SBML assignment rule (given as a :py:class:`libsbml.AssignmentRule` object). @@ -1216,7 +1205,7 @@ def from_annotation( sbml_id: sp.Symbol, annotation: ET.Element, *, - locals_: Dict[str, Any], + locals_: dict[str, Any], ) -> AbstractSpline: """Create a spline object from a SBML annotation. @@ -1289,9 +1278,9 @@ def from_annotation( @classmethod def _from_annotation( cls, - attributes: Dict[str, Any], - children: Dict[str, List[sp.Basic]], - ) -> Dict[str, Any]: + attributes: dict[str, Any], + children: dict[str, list[sp.Basic]], + ) -> dict[str, Any]: """ Given the attributes and children of a AMICI spline annotation, returns the keyword arguments to be passed @@ -1354,7 +1343,7 @@ def _from_annotation( return kwargs - def parameters(self, importer: sbml_import.SbmlImporter) -> Set[sp.Symbol]: + def parameters(self, importer: sbml_import.SbmlImporter) -> set[sp.Symbol]: """Returns the SBML parameters used by this spline""" from .de_export import SymbolId @@ -1362,7 +1351,7 @@ def parameters(self, importer: sbml_import.SbmlImporter) -> Set[sp.Symbol]: set(importer.symbols[SymbolId.PARAMETER].keys()) ) - def _parameters(self) -> Set[sp.Symbol]: + def _parameters(self) -> set[sp.Symbol]: parameters = set() for y in self.values_at_nodes: parameters.update(y.free_symbols) @@ -1451,12 +1440,12 @@ def _eval_is_real(self): def plot( self, - parameters: Optional[Dict] = None, + parameters: dict | None = None, *, - xlim: Optional[Tuple[float, float]] = None, + xlim: tuple[float, float] | None = None, npoints: int = 100, - xlabel: Optional[str] = None, - ylabel: Union[str, None] = "spline value", + xlabel: str | None = None, + ylabel: str | None = "spline value", ax=None, ): "Plots the spline, highlighting the nodes positions." @@ -1486,9 +1475,9 @@ def plot( def spline_user_functions( - splines: List[AbstractSpline], - p_index: Dict[sp.Symbol, int], -) -> Dict[str, List[Tuple[Callable[..., bool], Callable[..., str]]]]: + splines: list[AbstractSpline], + p_index: dict[sp.Symbol, int], +) -> dict[str, list[tuple[Callable[..., bool], Callable[..., str]]]]: """ Custom user functions to be used in `ODEExporter` for linking spline expressions to C++ code. @@ -1510,7 +1499,10 @@ def spline_user_functions( "AmiciSplineSensitivity": [ ( lambda *args: True, - lambda spline_id, x, param_id, *p: f"sspl_{spline_ids.index(spline_id)}_{p_index[param_id]}", + lambda spline_id, + x, + param_id, + *p: f"sspl_{spline_ids.index(spline_id)}_{p_index[param_id]}", ) ], } @@ -1519,12 +1511,12 @@ def spline_user_functions( class CubicHermiteSpline(AbstractSpline): def __init__( self, - sbml_id: Union[str, sp.Symbol], + sbml_id: str | sp.Symbol, nodes: Sequence, values_at_nodes: Sequence, derivatives_at_nodes: Sequence = None, *, - evaluate_at: Optional[Union[str, sp.Basic]] = None, + evaluate_at: str | sp.Basic | None = None, bc: BClike = "auto", extrapolate: BClike = None, logarithmic_parametrization: bool = False, @@ -1669,7 +1661,7 @@ def check_if_valid(self, importer: sbml_import.SbmlImporter) -> None: # TODO this is very much a draft from .de_export import SymbolId - species: List[sp.Symbol] = list(importer.symbols[SymbolId.SPECIES]) + species: list[sp.Symbol] = list(importer.symbols[SymbolId.SPECIES]) for d in self.derivatives_at_nodes: if len(d.free_symbols.intersection(species)) != 0: raise ValueError( @@ -1688,15 +1680,13 @@ def d_scaled(self, i: Integral) -> sp.Expr: return self.derivatives_at_nodes[i] / self.values_at_nodes[i] return self.derivatives_at_nodes[i] - def _poly_variable( - self, x: Union[Real, sp.Basic], i: Integral - ) -> sp.Basic: + def _poly_variable(self, x: Real | sp.Basic, i: Integral) -> sp.Basic: assert 0 <= i < len(self.nodes) - 1 dx = self.nodes[i + 1] - self.nodes[i] with evaluate(False): return (x - self.nodes[i]) / dx - def _poly(self, t: Union[Real, sp.Basic], i: Integral) -> sp.Basic: + def _poly(self, t: Real | sp.Basic, i: Integral) -> sp.Basic: """ Return the symbolic expression for the spline restricted to the `i`-th interval as polynomial in the scaled variable `t`. @@ -1718,7 +1708,7 @@ def _poly(self, t: Union[Real, sp.Basic], i: Integral) -> sp.Basic: with evaluate(False): return h00 * y0 + h10 * dx * dy0 + h01 * y1 + h11 * dx * dy1 - def _annotation_children(self) -> Dict[str, Union[str, List[str]]]: + def _annotation_children(self) -> dict[str, str | list[str]]: children = super()._annotation_children() if not self._derivatives_by_fd: children["spline_derivatives"] = [ @@ -1726,7 +1716,7 @@ def _annotation_children(self) -> Dict[str, Union[str, List[str]]]: ] return children - def _parameters(self) -> Set[sp.Symbol]: + def _parameters(self) -> set[sp.Symbol]: parameters = super()._parameters() for d in self.derivatives_at_nodes: parameters.update(d.free_symbols) @@ -1741,7 +1731,7 @@ def _replace_in_all_expressions( ] @classmethod - def _from_annotation(cls, attributes, children) -> Dict[str, Any]: + def _from_annotation(cls, attributes, children) -> dict[str, Any]: kwargs = super()._from_annotation(attributes, children) if "spline_derivatives" in children.keys(): diff --git a/python/sdist/amici/swig.py b/python/sdist/amici/swig.py index ef75646389..902145ff3e 100644 --- a/python/sdist/amici/swig.py +++ b/python/sdist/amici/swig.py @@ -97,9 +97,9 @@ def _new_annot(self, old_annot: str): ) in self.mapping: value_type_annot = self.mapping[value_type] if isinstance(value_type_annot, ast.Constant): - return ast.Name(f"Tuple['{value_type_annot.value}']") + return ast.Name(f"tuple['{value_type_annot.value}']") if isinstance(value_type_annot, ast.Name): - return ast.Name(f"Tuple[{value_type_annot.id}]") + return ast.Name(f"tuple[{value_type_annot.id}]") return ast.Constant(old_annot) @@ -111,7 +111,7 @@ def fix_typehints(infilename, outfilename): return # file -> AST - with open(infilename, "r") as f: + with open(infilename) as f: source = f.read() parsed_source = ast.parse(source) diff --git a/python/sdist/amici/swig_wrappers.py b/python/sdist/amici/swig_wrappers.py index f56f3bd5d2..e16d4c2580 100644 --- a/python/sdist/amici/swig_wrappers.py +++ b/python/sdist/amici/swig_wrappers.py @@ -3,7 +3,8 @@ import sys import warnings from contextlib import contextmanager, suppress -from typing import Any, Dict, List, Optional, Sequence, Union +from typing import Any, Optional, Union +from collections.abc import Sequence import amici import amici.amici as amici_swig @@ -54,7 +55,7 @@ def _capture_cstdout(): def _get_ptr( - obj: Union[AmiciModel, AmiciExpData, AmiciSolver, AmiciReturnData] + obj: Union[AmiciModel, AmiciExpData, AmiciSolver, AmiciReturnData], ) -> Union[ "amici_swig.Model", "amici_swig.ExpData", @@ -154,7 +155,7 @@ def runAmiciSimulations( edata_list: AmiciExpDataVector, failfast: bool = True, num_threads: int = 1, -) -> List["numpy.ReturnDataView"]: +) -> list["numpy.ReturnDataView"]: """ Convenience wrapper for loops of amici.runAmiciSimulation @@ -254,7 +255,7 @@ def writeSolverSettingsToHDF5( def get_model_settings( model: AmiciModel, -) -> Dict[str, Any]: +) -> dict[str, Any]: """Get model settings that are set independently of the compiled model. :param model: The AMICI model instance. @@ -285,7 +286,7 @@ def get_model_settings( def set_model_settings( model: AmiciModel, - settings: Dict[str, Any], + settings: dict[str, Any], ) -> None: """Set model settings. diff --git a/python/sdist/amici/testing.py b/python/sdist/amici/testing.py index cdee80b1f0..8d4a73fbe1 100644 --- a/python/sdist/amici/testing.py +++ b/python/sdist/amici/testing.py @@ -32,7 +32,19 @@ class TemporaryDirectoryWinSafe(TemporaryDirectory): such failures. """ + def __init__(self, *args, delete=True, **kwargs): + super().__init__(*args, **kwargs) + # TODO Python3.12 TemporaryDirectory already has a delete argument + # remove this once we drop support for Python3.11 + self.delete = delete + + if not self.delete: + self._finalizer.detach() + def cleanup(self): + if not self.delete: + return + try: super().cleanup() except PermissionError as e: diff --git a/python/sdist/pyproject.toml b/python/sdist/pyproject.toml index 011064fbdb..91b8484af6 100644 --- a/python/sdist/pyproject.toml +++ b/python/sdist/pyproject.toml @@ -15,3 +15,8 @@ build-backend = "setuptools.build_meta" [tool.black] line-length = 79 + +[tool.ruff] +line-length = 79 +ignore = ["E402", "F403", "F405", "E741"] +extend-include = ["*.ipynb"] diff --git a/python/sdist/setup.cfg b/python/sdist/setup.cfg index f419578754..bf0e61cdd5 100644 --- a/python/sdist/setup.cfg +++ b/python/sdist/setup.cfg @@ -34,7 +34,6 @@ install_requires = numpy>=1.23.2; python_version=='3.11' numpy; python_version>='3.12' python-libsbml - h5py pandas>=2.0.2 wurlitzer toposort @@ -47,15 +46,24 @@ zip_safe = False petab = petab>=0.2.1 pysb = pysb>=1.13.1 test = + benchmark_models_petab @ git+https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git@master#subdirectory=src/python + h5py pytest pytest-cov pytest-rerunfailures coverage shyaml - antimony + antimony>=2.13 + # see https://github.com/sys-bio/antimony/issues/92 + # unsupported x86_64 / x86_64h + antimony!=2.14; platform_system=='Darwin' and platform_machine in 'x86_64h' + scipy vis = matplotlib seaborn +examples = + jupyter + scipy [options.package_data] amici = @@ -74,5 +82,5 @@ amici = ; amici_import_petab.py is kept for backwards compatibility console_scripts = - amici_import_petab = amici.petab_import:_main - amici_import_petab.py = amici.petab_import:_main + amici_import_petab = amici.petab.cli.import_petab:_main + amici_import_petab.py = amici.petab.cli.import_petab:_main diff --git a/python/sdist/setup.py b/python/sdist/setup.py index 4d65634cc2..ed65127f7a 100755 --- a/python/sdist/setup.py +++ b/python/sdist/setup.py @@ -146,7 +146,6 @@ def main(): # (https://pypi.org/project/amici/) with open( os.path.join(os.path.dirname(__file__), "README.md"), - "r", encoding="utf-8", ) as fh: long_description = fh.read() diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 9ab64b91d7..1da7cb31b3 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -2,7 +2,6 @@ import copy import importlib import os -import shutil import sys import amici diff --git a/python/tests/petab/test_petab_problem.py b/python/tests/petab/test_petab_problem.py new file mode 100644 index 0000000000..5a8a299bb9 --- /dev/null +++ b/python/tests/petab/test_petab_problem.py @@ -0,0 +1,87 @@ +from amici.petab.petab_problem import PetabProblem +from benchmark_models_petab import get_problem +from amici.testing import skip_on_valgrind + + +@skip_on_valgrind +def test_amici_petab_problem_pregenerate(): + """PetabProblem with pre-generated ExpDatas""" + # any example is fine - the only assumption is that we don't have + # preequilibration + petab_problem = get_problem("Boehm_JProteomeRes2014") + app = PetabProblem(petab_problem, store_edatas=True) + + # ensure all edatas are generated upon construction + assert len(app._edatas) == len( + petab_problem.get_simulation_conditions_from_measurement_df() + ) + + # ensure the cached edatas are returned + for i, (_, condition) in enumerate( + petab_problem.get_simulation_conditions_from_measurement_df().iterrows() + ): + assert app.get_edata(condition.simulationConditionId) is app._edatas[i] + + # ensure parameter are updated + edatas = app.get_edatas() + app.set_parameters( + {app.model.getParameterIds()[0]: 0.12345}, scaled_parameters=True + ) + for edata in edatas: + assert edata.parameters[0] == 0.12345 + + +@skip_on_valgrind +def test_amici_petab_problem_on_demand(): + """PetabProblem with on-demand ExpDatas""" + # any example is fine - the only assumption is that we don't have + # preequilibration + petab_problem = get_problem("Boehm_JProteomeRes2014") + app = PetabProblem(petab_problem, store_edatas=False) + + # ensure no edatas are generated upon construction + assert not app._edatas + + edatas = app.get_edatas() + assert len(edatas) == len( + petab_problem.get_simulation_conditions_from_measurement_df() + ) + + # ensure parameter are updated + app.set_parameters( + {app.model.getParameterIds()[0]: 0.12345}, scaled_parameters=True + ) + # previously generated ExpDatas are not updated + for edata in edatas: + assert edata.parameters[0] != 0.12345 + # but newly generated ExpDatas are + for edata in app.get_edatas(): + assert edata.parameters[0] == 0.12345 + + some_sim_condition = ( + petab_problem.measurement_df.simulationConditionId.iloc[0] + ) + # different objects for subsequent calls + assert app.get_edata(some_sim_condition) is not app.get_edata( + some_sim_condition + ) + + +@skip_on_valgrind +def test_amici_petab_problem_pregenerate_equals_on_demand(): + """Check that PetabProblem produces the same ExpDatas + independent of the `store_edatas` parameter.""" + # any example is fine + petab_problem = get_problem("Boehm_JProteomeRes2014") + app_store_true = PetabProblem(petab_problem, store_edatas=True) + app_store_false = PetabProblem(petab_problem, store_edatas=False) + + parameter_update = {app_store_true.model.getParameterIds()[0]: 0.12345} + app_store_true.set_parameters(parameter_update, scaled_parameters=True) + app_store_false.set_parameters(parameter_update, scaled_parameters=True) + + for edata_store_true, edata_store_false in zip( + app_store_true.get_edatas(), app_store_false.get_edatas() + ): + assert edata_store_true is not edata_store_false + assert edata_store_true == edata_store_false diff --git a/python/tests/petab_test_problems/lotka_volterra/model/writer.py b/python/tests/petab_test_problems/lotka_volterra/model/writer.py index 76b98c3deb..c6abebaebe 100644 --- a/python/tests/petab_test_problems/lotka_volterra/model/writer.py +++ b/python/tests/petab_test_problems/lotka_volterra/model/writer.py @@ -1,6 +1,5 @@ from pathlib import Path -import petab import yaml2sbml yaml2sbml_yaml = "lotka_volterra.yaml" diff --git a/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py b/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py index 767c239c5d..1a39d4a846 100644 --- a/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py +++ b/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py @@ -3,7 +3,6 @@ http://bionetgen.org/index.php/Egfr_simple """ -from __future__ import print_function from pysb import * diff --git a/python/tests/splines_utils.py b/python/tests/splines_utils.py index 0746207ddb..29024d3b73 100644 --- a/python/tests/splines_utils.py +++ b/python/tests/splines_utils.py @@ -8,7 +8,8 @@ import os import uuid from tempfile import mkdtemp -from typing import Any, Dict, List, Optional, Sequence, Union +from typing import Any, Optional, Union +from collections.abc import Sequence import amici import numpy as np @@ -16,8 +17,8 @@ import petab import sympy as sp from amici.gradient_check import _check_results -from amici.petab_import import import_petab_problem -from amici.petab_objective import EDATAS, LLH, RDATAS, SLLH, simulate_petab +from amici.petab.petab_import import import_petab_problem +from amici.petab.simulations import EDATAS, LLH, RDATAS, SLLH, simulate_petab from amici.sbml_utils import ( add_compartment, add_inflow, @@ -44,7 +45,7 @@ def evaluate_spline( def integrate_spline( spline: AbstractSpline, - params: Union[Dict, None], + params: Union[dict, None], tt: Sequence[float], initial_value: float = 0, ): @@ -116,8 +117,8 @@ def species_to_index(name) -> int: def create_petab_problem( - splines: List[AbstractSpline], - params_true: Dict, + splines: list[AbstractSpline], + params_true: dict, initial_values: Optional[np.ndarray] = None, use_reactions: bool = False, measure_upsample: int = 6, @@ -514,7 +515,7 @@ def check_splines( w_atol: float = 1e-11, sx_rtol: float = 1e-10, sx_atol: float = 1e-10, - groundtruth: Optional[Union[str, Dict[str, Any]]] = None, + groundtruth: Optional[Union[str, dict[str, Any]]] = None, **kwargs, ): """ diff --git a/python/tests/test_antimony_import.py b/python/tests/test_antimony_import.py index 41af014aa9..44f8296c29 100644 --- a/python/tests/test_antimony_import.py +++ b/python/tests/test_antimony_import.py @@ -2,8 +2,10 @@ import numpy as np from amici.antimony_import import antimony2amici from amici.testing import TemporaryDirectoryWinSafe as TemporaryDirectory +from amici.testing import skip_on_valgrind +@skip_on_valgrind def test_antimony_example(): """If this example requires changes, please also update documentation/python_interface.rst.""" ant_model = """ diff --git a/python/tests/test_compare_conservation_laws_sbml.py b/python/tests/test_compare_conservation_laws_sbml.py index 4d6a453b52..640d2dd988 100644 --- a/python/tests/test_compare_conservation_laws_sbml.py +++ b/python/tests/test_compare_conservation_laws_sbml.py @@ -5,6 +5,7 @@ import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal +from amici.testing import skip_on_valgrind @pytest.fixture @@ -123,6 +124,7 @@ def get_results( return amici.runAmiciSimulation(model, solver, edata) +@skip_on_valgrind def test_compare_conservation_laws_sbml(models, edata_fixture): # first, create the model model_with_cl, model_without_cl = models @@ -288,6 +290,7 @@ def test_adjoint_pre_and_post_equilibration(models, edata_fixture): assert_allclose(raa_cl["sllh"], raa["sllh"], 1e-5, 1e-5) +@skip_on_valgrind def test_get_set_model_settings(models): """test amici.(get|set)_model_settings cycles for models with and without conservation laws""" diff --git a/python/tests/test_conserved_quantities_rref.py b/python/tests/test_conserved_quantities_rref.py index ada4b46729..7a131ebee3 100644 --- a/python/tests/test_conserved_quantities_rref.py +++ b/python/tests/test_conserved_quantities_rref.py @@ -1,5 +1,3 @@ -import os - import numpy as np import pytest import sympy as sp diff --git a/python/tests/test_cxxcodeprinter.py b/python/tests/test_cxxcodeprinter.py index 384b8ad9ae..3f92a5495c 100644 --- a/python/tests/test_cxxcodeprinter.py +++ b/python/tests/test_cxxcodeprinter.py @@ -1,8 +1,10 @@ import sympy as sp from amici.cxxcodeprinter import AmiciCxxCodePrinter from sympy.codegen.rewriting import optims_c99 +from amici.testing import skip_on_valgrind +@skip_on_valgrind def test_optimizations(): """Check that AmiciCxxCodePrinter handles optimizations correctly.""" try: diff --git a/python/tests/test_de_model.py b/python/tests/test_de_model.py new file mode 100644 index 0000000000..8bd750443a --- /dev/null +++ b/python/tests/test_de_model.py @@ -0,0 +1,37 @@ +import sympy as sp +from amici.de_model import Event +from amici.import_utils import amici_time_symbol +from amici.testing import skip_on_valgrind + + +@skip_on_valgrind +def test_event_trigger_time(): + e = Event( + sp.Symbol("event1"), "event name", amici_time_symbol - 10, sp.Float(0) + ) + assert e.triggers_at_fixed_timepoint() is True + assert e.get_trigger_time() == 10 + + # fixed, but multiple timepoints - not (yet) supported + e = Event( + sp.Symbol("event1"), + "event name", + sp.sin(amici_time_symbol), + sp.Float(0), + ) + assert e.triggers_at_fixed_timepoint() is False + + e = Event( + sp.Symbol("event1"), "event name", amici_time_symbol / 2, sp.Float(0) + ) + assert e.triggers_at_fixed_timepoint() is True + assert e.get_trigger_time() == 0 + + # parameter-dependent triggers - not (yet) supported + e = Event( + sp.Symbol("event1"), + "event name", + amici_time_symbol - sp.Symbol("delay"), + sp.Float(0), + ) + assert e.triggers_at_fixed_timepoint() is False diff --git a/python/tests/test_edata.py b/python/tests/test_edata.py index 9c4d9b9edc..27c67de61e 100644 --- a/python/tests/test_edata.py +++ b/python/tests/test_edata.py @@ -2,11 +2,11 @@ import amici import numpy as np from amici.testing import skip_on_valgrind -from test_sbml_import import model_units_module +from test_sbml_import import model_units_module # noqa: F401 @skip_on_valgrind -def test_edata_sensi_unscaling(model_units_module): +def test_edata_sensi_unscaling(model_units_module): # noqa: F811 """ ExpData parameters should be used for unscaling initial state sensitivities. diff --git a/python/tests/test_events.py b/python/tests/test_events.py index d2a177bded..065cdeb126 100644 --- a/python/tests/test_events.py +++ b/python/tests/test_events.py @@ -1,8 +1,12 @@ """Tests for SBML events, including piecewise expressions.""" from copy import deepcopy +import amici import numpy as np import pytest +from amici.antimony_import import antimony2amici +from amici.gradient_check import check_derivatives +from amici.testing import TemporaryDirectoryWinSafe as TemporaryDirectory from amici.testing import skip_on_valgrind from util import ( check_trajectories_with_forward_sensitivities, @@ -704,3 +708,40 @@ def expm(x): from mpmath import expm return np.array(expm(x).tolist()).astype(float) + + +def test_handling_of_fixed_time_point_event_triggers(): + """Test handling of events without solver-tracked root functions.""" + ant_model = """ + model test_events_time_based + event_target = 0 + bolus = 1 + at (time > 1): event_target = 1 + at (time > 2): event_target = event_target + bolus + at (time > 3): event_target = 3 + end + """ + module_name = "test_events_time_based" + with TemporaryDirectory(prefix=module_name, delete=False) as outdir: + antimony2amici( + ant_model, + model_name=module_name, + output_dir=outdir, + verbose=True, + ) + model_module = amici.import_model_module( + module_name=module_name, module_path=outdir + ) + amici_model = model_module.getModel() + assert amici_model.ne == 3 + assert amici_model.ne_solver == 0 + amici_model.setTimepoints(np.linspace(0, 4, 200)) + amici_solver = amici_model.getSolver() + rdata = amici.runAmiciSimulation(amici_model, amici_solver) + assert rdata.status == amici.AMICI_SUCCESS + assert (rdata.x[rdata.ts < 1] == 0).all() + assert (rdata.x[(rdata.ts >= 1) & (rdata.ts < 2)] == 1).all() + assert (rdata.x[(rdata.ts >= 2) & (rdata.ts < 3)] == 2).all() + assert (rdata.x[(rdata.ts >= 3)] == 3).all() + + check_derivatives(amici_model, amici_solver, edata=None) diff --git a/python/tests/test_misc.py b/python/tests/test_misc.py index 5a88fda6f8..24bba79888 100644 --- a/python/tests/test_misc.py +++ b/python/tests/test_misc.py @@ -78,8 +78,7 @@ def test_cmake_compilation(sbml_example_presimulation_module): cmd, shell=True, check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, ) except subprocess.CalledProcessError as e: print(e.stdout.decode()) diff --git a/python/tests/test_ode_export.py b/python/tests/test_ode_export.py index b30d451a4a..f34d78892d 100644 --- a/python/tests/test_ode_export.py +++ b/python/tests/test_ode_export.py @@ -93,6 +93,7 @@ def test_csc_matrix_vector(): assert str(sparse_matrix) == "Matrix([[0], [da2_db_1]])" +@skip_on_valgrind def test_match_deriv(): from amici.de_export import DERIVATIVE_PATTERN as pat diff --git a/python/tests/test_pandas.py b/python/tests/test_pandas.py index 21c58bcaff..40799120ad 100644 --- a/python/tests/test_pandas.py +++ b/python/tests/test_pandas.py @@ -5,6 +5,8 @@ import amici import numpy as np import pytest +from amici.testing import skip_on_valgrind + # test parameters for test_pandas_import_export combos = itertools.product([(10, 5), (5, 10), ()], repeat=3) @@ -18,6 +20,7 @@ ] +@skip_on_valgrind @pytest.mark.parametrize("case", cases) def test_pandas_import_export(sbml_example_presimulation_module, case): """TestCase class for testing csv import using pandas""" diff --git a/python/tests/test_parameter_mapping.py b/python/tests/test_parameter_mapping.py index ae66e23f53..0138d80932 100644 --- a/python/tests/test_parameter_mapping.py +++ b/python/tests/test_parameter_mapping.py @@ -1,8 +1,6 @@ """Test for ``amici.parameter_mapping``""" -import os -import pytest -from amici.parameter_mapping import ( +from amici.petab.parameter_mapping import ( ParameterMapping, ParameterMappingForCondition, ) diff --git a/python/tests/test_petab_import.py b/python/tests/test_petab_import.py index fc978b76ea..7a476f272d 100644 --- a/python/tests/test_petab_import.py +++ b/python/tests/test_petab_import.py @@ -6,7 +6,6 @@ from amici.testing import TemporaryDirectoryWinSafe, skip_on_valgrind petab = pytest.importorskip("petab", reason="Missing petab") -amici_petab_import = pytest.importorskip("amici.petab_import") @pytest.fixture @@ -47,6 +46,9 @@ def test_get_fixed_parameters(simple_sbml_model): p4: not fixed (via parameter table `estimate=1`) p5: fixed (implicitly, because not listed as estimated) """ + from amici.petab.sbml_import import ( + _get_fixed_parameters_sbml as get_fixed_parameters, + ) from petab.models.sbml_model import SbmlModel sbml_doc, sbml_model = simple_sbml_model @@ -71,14 +73,14 @@ def test_get_fixed_parameters(simple_sbml_model): parameter_df=parameter_df, condition_df=condition_df, ) - assert set(amici_petab_import.get_fixed_parameters(petab_problem)) == { + assert set(get_fixed_parameters(petab_problem)) == { "p1", "p3", "p5", } assert set( - amici_petab_import.get_fixed_parameters( + get_fixed_parameters( petab_problem, non_estimated_parameters_as_constants=False ) ) == {"p1", "p5"} @@ -86,6 +88,7 @@ def test_get_fixed_parameters(simple_sbml_model): @skip_on_valgrind def test_default_output_parameters(simple_sbml_model): + from amici.petab.petab_import import import_model from petab.models.sbml_model import SbmlModel sbml_doc, sbml_model = simple_sbml_model @@ -116,7 +119,7 @@ def test_default_output_parameters(simple_sbml_model): ) with TemporaryDirectoryWinSafe() as outdir: - sbml_importer = amici_petab_import.import_model( + sbml_importer = import_model( petab_problem=petab_problem, output_parameter_defaults={"observableParameter1_obs1": 1.0}, compile=False, @@ -130,7 +133,7 @@ def test_default_output_parameters(simple_sbml_model): ) with pytest.raises(ValueError): - amici_petab_import.import_model( + import_model( petab_problem=petab_problem, output_parameter_defaults={"nonExistentParameter": 1.0}, compile=False, diff --git a/python/tests/test_petab_objective.py b/python/tests/test_petab_objective.py index e31e693d11..5d29ad88ff 100755 --- a/python/tests/test_petab_objective.py +++ b/python/tests/test_petab_objective.py @@ -4,13 +4,13 @@ from pathlib import Path import amici -import amici.petab_import -import amici.petab_objective import numpy as np import pandas as pd import petab import pytest -from amici.petab_objective import SLLH +from amici.petab.petab_import import import_petab_problem +from amici.petab.simulations import SLLH, simulate_petab +from amici.testing import skip_on_valgrind # Absolute and relative tolerances for finite difference gradient checks. ATOL: float = 1e-3 @@ -30,9 +30,10 @@ def lotka_volterra() -> petab.Problem: ) +@skip_on_valgrind def test_simulate_petab_sensitivities(lotka_volterra): petab_problem = lotka_volterra - amici_model = amici.petab_import.import_petab_problem(petab_problem) + amici_model = import_petab_problem(petab_problem) amici_solver = amici_model.getSolver() amici_solver.setSensitivityOrder(amici.SensitivityOrder_first) @@ -54,7 +55,7 @@ def test_simulate_petab_sensitivities(lotka_volterra): problem_parameters ) results[(scaled_parameters, scaled_gradients)] = pd.Series( - amici.petab_objective.simulate_petab( + simulate_petab( petab_problem=petab_problem, amici_model=amici_model, solver=amici_solver, diff --git a/python/tests/test_petab_simulate.py b/python/tests/test_petab_simulate.py index febea5fd50..a8240bff33 100644 --- a/python/tests/test_petab_simulate.py +++ b/python/tests/test_petab_simulate.py @@ -5,7 +5,7 @@ import petab import petabtests import pytest -from amici.petab_simulate import PetabSimulator +from amici.petab.simulator import PetabSimulator from amici.testing import skip_on_valgrind diff --git a/python/tests/test_preequilibration.py b/python/tests/test_preequilibration.py index a42bc6354d..be447b0c54 100644 --- a/python/tests/test_preequilibration.py +++ b/python/tests/test_preequilibration.py @@ -5,7 +5,8 @@ import amici import numpy as np import pytest -from numpy.testing import assert_allclose +from amici.debugging import get_model_for_preeq +from numpy.testing import assert_allclose, assert_equal from test_pysb import get_data @@ -633,3 +634,31 @@ def test_simulation_errors(preeq_fixture): assert rdata._swigptr.messages[2].identifier == "OTHER" assert rdata._swigptr.messages[3].severity == amici.LogSeverity_debug assert rdata._swigptr.messages[3].identifier == "BACKTRACE" + + +def test_get_model_for_preeq(preeq_fixture): + ( + model, + solver, + edata, + edata_preeq, + edata_presim, + edata_sim, + pscales, + plists, + ) = preeq_fixture + model.setSteadyStateSensitivityMode( + amici.SteadyStateSensitivityMode.integrationOnly + ) + model_preeq = get_model_for_preeq(model, edata) + # the exactly same settings are used, so results should match exactly + rdata1 = amici.runAmiciSimulation(model_preeq, solver) + rdata2 = amici.runAmiciSimulation(model, solver, edata_preeq) + assert_equal( + rdata1.x, + rdata2.x, + ) + assert_equal( + rdata1.sx, + rdata2.sx, + ) diff --git a/python/tests/test_pysb.py b/python/tests/test_pysb.py index 52ca3a320f..2911b05fc9 100644 --- a/python/tests/test_pysb.py +++ b/python/tests/test_pysb.py @@ -1,4 +1,5 @@ """PYSB model tests""" +# flake8: noqa: F821 import importlib import logging diff --git a/python/tests/test_rdata.py b/python/tests/test_rdata.py index ac7659f363..8e0f78655e 100644 --- a/python/tests/test_rdata.py +++ b/python/tests/test_rdata.py @@ -4,6 +4,7 @@ import pytest from amici.numpy import evaluate from numpy.testing import assert_almost_equal, assert_array_equal +from amici.testing import skip_on_valgrind @pytest.fixture(scope="session") @@ -19,6 +20,7 @@ def rdata_by_id_fixture(sbml_example_presimulation_module): return model, rdata +@skip_on_valgrind def test_rdata_by_id(rdata_by_id_fixture): model, rdata = rdata_by_id_fixture @@ -42,6 +44,7 @@ def test_rdata_by_id(rdata_by_id_fixture): ) +@skip_on_valgrind def test_evaluate(rdata_by_id_fixture): # get IDs of model components model, rdata = rdata_by_id_fixture diff --git a/python/tests/test_sbml_import.py b/python/tests/test_sbml_import.py index 7c4a67c0a2..aa343dfcc3 100644 --- a/python/tests/test_sbml_import.py +++ b/python/tests/test_sbml_import.py @@ -654,6 +654,7 @@ def _test_set_parameters_by_dict(model_module): assert model.getParameters() == old_parameter_values +@skip_on_valgrind @pytest.mark.parametrize("extract_cse", [True, False]) def test_code_gen_uses_cse(extract_cse): """Check that code generation honors AMICI_EXTRACT_CSE""" @@ -675,6 +676,7 @@ def test_code_gen_uses_cse(extract_cse): os.environ = old_environ +@skip_on_valgrind def test_code_gen_uses_lhs_symbol_ids(): """Check that code generation uses symbol IDs instead of plain array indices""" @@ -691,6 +693,7 @@ def test_code_gen_uses_lhs_symbol_ids(): assert "dobservable_x1_dx1 = " in dwdx +@skip_on_valgrind def test_hardcode_parameters(simple_sbml_model): """Test model generation works for model without observables""" sbml_doc, sbml_model = simple_sbml_model diff --git a/python/tests/test_swig_interface.py b/python/tests/test_swig_interface.py index a746552b55..f214519f26 100644 --- a/python/tests/test_swig_interface.py +++ b/python/tests/test_swig_interface.py @@ -6,6 +6,8 @@ import copy import numbers +import pytest + import amici import numpy as np @@ -66,10 +68,7 @@ def test_copy_constructors(pysb_example_presimulation_module): model_instance_settings0 = { # setting name: [default value, custom value] "AddSigmaResiduals": [False, True], - "AlwaysCheckFinite": [ - False, - True, - ], + "AlwaysCheckFinite": [False, True], # Skipped due to model dependency in `'InitialStates'`. "FixedParameters": None, "InitialStates": [ @@ -130,6 +129,13 @@ def test_model_instance_settings(pysb_example_presimulation_module): i_getter = 0 i_setter = 1 + # the default setting for AlwaysCheckFinite depends on whether the amici + # extension has been built in debug mode + model_instance_settings0["AlwaysCheckFinite"] = [ + model0.getAlwaysCheckFinite(), + not model0.getAlwaysCheckFinite(), + ] + # All settings are tested. assert set(model_instance_settings0) == set( amici.swig_wrappers.model_instance_settings @@ -315,6 +321,7 @@ def test_unhandled_settings(pysb_example_presimulation_module): "setParametersByIdRegex", "setParametersByNameRegex", "setInitialStateSensitivities", + "get_trigger_timepoints", ] from amici.swig_wrappers import model_instance_settings @@ -420,8 +427,6 @@ def test_solver_repr(): for s in (solver, solver_ptr): assert "maxsteps" in str(s) assert "maxsteps" in repr(s) - # avoid double delete!! - solver_ptr.release() def test_edata_repr(): @@ -441,8 +446,6 @@ def test_edata_repr(): for expected_str in expected_strs: assert expected_str in str(e) assert expected_str in repr(e) - # avoid double delete!! - edata_ptr.release() def test_edata_equality_operator(): @@ -470,3 +473,55 @@ def test_expdata_and_expdataview_are_deepcopyable(): ev2 = copy.deepcopy(ev1) assert ev2._swigptr.this != ev1._swigptr.this assert ev1 == ev2 + + +def test_solvers_are_deepcopyable(): + for solver_type in (amici.CVodeSolver, amici.IDASolver): + for solver1 in (solver_type(), amici.SolverPtr(solver_type())): + solver2 = copy.deepcopy(solver1) + assert solver1.this != solver2.this + assert ( + solver1.getRelativeTolerance() + == solver2.getRelativeTolerance() + ) + solver2.setRelativeTolerance(100 * solver2.getRelativeTolerance()) + assert ( + solver1.getRelativeTolerance() + != solver2.getRelativeTolerance() + ) + + +def test_model_is_deepcopyable(pysb_example_presimulation_module): + model_module = pysb_example_presimulation_module + for model1 in ( + model_module.getModel(), + amici.ModelPtr(model_module.getModel()), + ): + model2 = copy.deepcopy(model1) + assert model1.this != model2.this + assert model1.t0() == model2.t0() + model2.setT0(100 + model2.t0()) + assert model1.t0() != model2.t0() + + +def test_rdataview(sbml_example_presimulation_module): + """Test some SwigPtrView functionality via ReturnDataView.""" + model_module = sbml_example_presimulation_module + model = model_module.getModel() + rdata = amici.runAmiciSimulation(model, model.getSolver()) + assert isinstance(rdata, amici.ReturnDataView) + + # fields are accessible via dot notation and [] operator, + # __contains__ and __getattr__ are implemented correctly + with pytest.raises(AttributeError): + _ = rdata.nonexisting_attribute + + with pytest.raises(KeyError): + _ = rdata["nonexisting_attribute"] + + assert not hasattr(rdata, "nonexisting_attribute") + assert "x" in rdata + assert rdata.x == rdata["x"] + + # field names are included by dir() + assert "x" in dir(rdata) diff --git a/python/tests/valgrind-python.supp b/python/tests/valgrind-python.supp index 26a9f0e7d1..9cd3f5de3b 100644 --- a/python/tests/valgrind-python.supp +++ b/python/tests/valgrind-python.supp @@ -76,17 +76,28 @@ Memcheck:Leak fun:malloc ... - fun:__pyx_pw_5numpy_6random_13bit_generator_12BitGenerator_1__init__ + fun:__pyx_pw_5numpy_* } { numpy Memcheck:Leak - match-leak-kinds: definite + match-leak-kinds: possible fun:malloc - obj:/usr/bin/python3.? + fun:PyUFunc_FromFuncAndDataAndSignatureAndIdentity + fun:initumath + fun:PyInit__multiarray_umath + ... +} + +{ + numpy + Memcheck:Leak + match-leak-kinds: possible + fun:malloc + fun:default_malloc + fun:PyDataMem_UserNEW ... - fun:gentype_generic_method } # @@ -178,8 +189,8 @@ { other Memcheck:Cond - obj:/usr/bin/python3.? - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* + obj:/usr/bin/python3.* fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall @@ -187,17 +198,17 @@ fun:_PyEval_EvalCodeWithName fun:_PyFunction_Vectorcall fun:_PyEval_EvalFrameDefault - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* fun:_PyEval_EvalFrameDefault - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* } { other Memcheck:Value8 - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* fun:__Pyx_PyObject_Call fun:__Pyx__PyObject_CallOneArg } @@ -308,7 +319,7 @@ ... fun:PyBytes_Repr fun:PyObject_Str - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... fun:PyObject_Format ... @@ -382,9 +393,9 @@ { other Memcheck:Cond - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* fun:__Pyx_PyObject_Call fun:__Pyx__PyObject_CallOneArg ... @@ -410,9 +421,9 @@ other Memcheck:Cond ... - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* fun:_PyObject_CallMethodIdObjArgs - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... } @@ -437,9 +448,9 @@ other Memcheck:Value8 ... - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* fun:_PyObject_CallMethodIdObjArgs - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... } @@ -455,25 +466,25 @@ other Memcheck:Value8 ... - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* fun:PyDict_SetItem - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... } { other Memcheck:Cond - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* } { other Memcheck:Cond fun:realloc - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... fun:_PyFunction_Vectorcall } @@ -481,54 +492,40 @@ { other Memcheck:Value8 - obj:/usr/bin/python3.? + obj:/usr/bin/python3.* ... - obj:/usr/bin/python3.? -} - -{ - other - Memcheck:Leak - match-leak-kinds: definite - fun:malloc - obj:/usr/bin/python3.? - fun:_PyObject_MakeTpCall + obj:/usr/bin/python3.* } { other - Memcheck:Leak - match-leak-kinds: definite - fun:malloc - obj:/usr/bin/python3.? - obj:/usr/bin/python3.? + Memcheck:Value8 + obj:/usr/bin/python3.* fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.11 + obj:/usr/bin/python3.11 + obj:/usr/bin/python3.11 + fun:PyIter_Next + obj:/usr/bin/python3.11 + fun:PyBytes_FromObject + obj:/usr/bin/python3.11 + obj:/usr/bin/python3.11 + fun:PyObject_Vectorcall } { other Memcheck:Leak - match-leak-kinds: definite - fun:malloc - obj:/usr/bin/python3.? - ... - fun:PyTuple_New + fun:realloc ... + fun:_dl_catch_exception } { other Memcheck:Leak - match-leak-kinds: definite fun:malloc - obj:/usr/bin/python3.? - fun:PyList_AsTuple -} - -{ - other - Memcheck:Leak - fun:realloc ... fun:_dl_catch_exception } @@ -536,7 +533,7 @@ { other Memcheck:Leak - fun:malloc + fun:calloc ... fun:_dl_catch_exception } @@ -544,7 +541,6 @@ { Pandas Memcheck:Leak - match-leak-kinds: definite fun:malloc ... obj:*site-packages/pandas/_libs/*.cpython-3*-x86_64-linux-gnu.so @@ -561,30 +557,6 @@ ... } - -{ - PyTuple_Pack - Memcheck:Leak - match-leak-kinds: definite - fun:malloc - obj:/usr/bin/python3.* - fun:PyTuple_Pack - obj:/usr/bin/python3.* - ... -} - -{ - PyAST_CompileObject - Memcheck:Leak - match-leak-kinds: definite - fun:malloc - obj:/usr/bin/python3.* - ... - fun:PyAST_CompileObject - obj:/usr/bin/python3.* - ... -} - { other Memcheck:Value8 @@ -775,3 +747,103 @@ fun:os_stat ... } + +{ + Python PyLong_FromUnicodeObject + Memcheck:Cond + fun:PyLong_FromString + fun:PyLong_FromUnicodeObject +} + +{ + Python PyLong_FromUnicodeObject + Memcheck:Value8 + fun:PyLong_FromString + fun:PyLong_FromUnicodeObject +} + +{ + Python + Memcheck:Leak + match-leak-kinds: possible + fun:realloc + obj:/usr/bin/python3.* + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall +} + +{ + Python + Memcheck:Leak + match-leak-kinds: possible + fun:calloc + fun:PyList_New + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + ... +} + +{ + Python + Memcheck:Leak + match-leak-kinds: possible + fun:malloc + fun:PyModule_ExecDef + obj:/usr/bin/python3.* + ... +} + +{ + Python + Memcheck:Leak + match-leak-kinds: possible + fun:malloc + obj:/usr/bin/python3.* + ... +} + +{ + Python + Memcheck:Addr32 + fun:__wcsncpy_avx2 + fun:_Py_wgetcwd + obj:/usr/bin/python3.* + fun:Py_RunMain + fun:Py_BytesMain + fun:(below main) +} + +{ + Python + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.* + ... +} + +{ + Python + Memcheck:Leak + match-leak-kinds: definite + fun:*alloc + fun:_PyObject_GC_* + obj:/usr/bin/python3.* +} + + +{ + Antimony with libsbml 5.20.1 + Memcheck:Leak + match-leak-kinds: definite + fun:_Znwm + fun:_ZN7libsbml12SBMLDocument14getAllElementsEPNS_13ElementFilterE + fun:_ZN7libsbml23CompFlatteningConverter21unsetExplicitlyListedEv + fun:_ZN7libsbml23CompFlatteningConverter17performConversionEv + fun:_ZN7libsbml23CompFlatteningConverter7convertEv + fun:_ZN7libsbml22CompSBMLDocumentPlugin16checkConsistencyEv + fun:_ZN7libsbml12SBMLDocument16checkConsistencyEv + ... + fun:loadAntimonyString + ... +} diff --git a/scripts/buildAmici.sh b/scripts/buildAmici.sh index a8ddc7c27d..507a391621 100755 --- a/scripts/buildAmici.sh +++ b/scripts/buildAmici.sh @@ -25,7 +25,7 @@ else fi # required for build swig interface -python3 -m pip install numpy +pip show numpy > /dev/null || python3 -m pip install numpy ${cmake} \ -Wdev -DAMICI_CXX_OPTIONS="-Wall;-Wextra${extra_cxx_flags}" \ diff --git a/scripts/buildSuiteSparse.sh b/scripts/buildSuiteSparse.sh index 36b493b62d..e916530de6 100755 --- a/scripts/buildSuiteSparse.sh +++ b/scripts/buildSuiteSparse.sh @@ -9,6 +9,6 @@ amici_path=$(cd "$script_path/.." && pwd) suitesparse_root="${amici_path}/ThirdParty/SuiteSparse" export CMAKE_OPTIONS="-DBLA_VENDOR=All -DENABLE_CUDA=FALSE -DNFORTRAN=TRUE -DNCHOLMOD=TRUE" -for subdir in SuiteSparse_config BTF AMD CAMD COLAMD KLU +for subdir in SuiteSparse_config BTF AMD COLAMD KLU do cd "${suitesparse_root}/${subdir}" && make local install done diff --git a/scripts/downloadAndBuildDoxygen.sh b/scripts/downloadAndBuildDoxygen.sh index 19d86be5a1..4efa9ab483 100755 --- a/scripts/downloadAndBuildDoxygen.sh +++ b/scripts/downloadAndBuildDoxygen.sh @@ -9,7 +9,7 @@ DOXYGEN_DIR="${AMICI_PATH}"/ThirdParty/doxygen cd "${AMICI_PATH}"/ThirdParty if [[ ! -d ${DOXYGEN_DIR} ]]; then git clone --single-branch \ - --branch Release_1_9_7 \ + --branch Release_1_10_0 \ --depth 1 \ -c advice.detachedHead=false \ https://github.com/doxygen/doxygen.git "${DOXYGEN_DIR}" diff --git a/scripts/installAmiciSource.sh b/scripts/installAmiciSource.sh index 4e693468b7..bbb4bf4a83 100755 --- a/scripts/installAmiciSource.sh +++ b/scripts/installAmiciSource.sh @@ -1,35 +1,37 @@ #!/bin/bash -# -# Build libamici -# +# Create a virtual environment and perform an editable amici installation set -e SCRIPT_PATH=$(dirname $BASH_SOURCE) -AMICI_PATH=$(cd $SCRIPT_PATH/.. && pwd) +AMICI_PATH=$(cd "$SCRIPT_PATH/.." && pwd) +venv_dir="${AMICI_PATH}/build/venv" # Disabled until cmake package is made compatible with updated setup.py #make python-wheel #pip3 install --user --prefix= `ls -t ${AMICI_PATH}/build/python/amici-*.whl | head -1` # test install from setup.py set +e -python3 -m venv ${AMICI_PATH}/build/venv --clear +mkdir -p "${venv_dir}" +python3 -m venv "${venv_dir}" --clear # in case this fails (usually due to missing ensurepip, try getting pip # manually if [[ $? ]]; then set -e - python3 -m venv ${AMICI_PATH}/build/venv --clear --without-pip - source ${AMICI_PATH}/build/venv/bin/activate - curl https://bootstrap.pypa.io/get-pip.py -o ${AMICI_PATH}/build/get-pip.py - python3 ${AMICI_PATH}/build/get-pip.py + python3 -m venv "${venv_dir}" --clear --without-pip + source "${venv_dir}/bin/activate" + get_pip=${AMICI_PATH}/build/get-pip.py + curl "https://bootstrap.pypa.io/get-pip.py" -o "${get_pip}" + python3 "${get_pip}" + rm "${get_pip}" else set -e - source ${AMICI_PATH}/build/venv/bin/activate + source "${venv_dir}/bin/activate" fi -pip install --upgrade pip wheel -pip install --upgrade pip scipy matplotlib coverage pytest \ - pytest-cov cmake_build_extension numpy -pip install git+https://github.com/FFroehlich/pysb@fix_pattern_matching # pin to PR for SPM with compartments -AMICI_BUILD_TEMP="${AMICI_PATH}/python/sdist/build/temp" pip install --verbose -e ${AMICI_PATH}/python/sdist[petab,test,vis] --no-build-isolation +python -m pip install --upgrade pip wheel +python -m pip install --upgrade pip setuptools cmake_build_extension numpy +python -m pip install git+https://github.com/FFroehlich/pysb@fix_pattern_matching # pin to PR for SPM with compartments +AMICI_BUILD_TEMP="${AMICI_PATH}/python/sdist/build/temp" \ + python -m pip install --verbose -e "${AMICI_PATH}/python/sdist[petab,test,vis]" --no-build-isolation deactivate diff --git a/scripts/run-python-tests.sh b/scripts/run-python-tests.sh index 982aa02f0f..d58a1c8dec 100755 --- a/scripts/run-python-tests.sh +++ b/scripts/run-python-tests.sh @@ -1,7 +1,8 @@ #!/bin/bash -# Test python model wrapping inside virtual environment +# Run Python test suite inside virtual environment +# Usage: ./run-python-tests.sh [additional pytest arguments] -script_path=$(dirname $BASH_SOURCE) +script_path=$(dirname "${BASH_SOURCE[0]}") amici_path=$(cd "$script_path"/.. && pwd) set -e @@ -12,7 +13,10 @@ fi cd "${amici_path}"/python/tests source "${amici_path}"/build/venv/bin/activate -pip install scipy h5py pytest pytest-cov # PEtab tests are run separately -pytest --ignore-glob=*petab* --ignore-glob=*test_splines.py +pytest \ + --ignore-glob=*petab* \ + --ignore-glob=*test_splines.py \ + --durations=10 \ + $@ diff --git a/scripts/run-sphinx-hasenv.sh b/scripts/run-sphinx-hasenv.sh deleted file mode 100755 index e401737022..0000000000 --- a/scripts/run-sphinx-hasenv.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Build the sphinx documentation in an environment prepared -# as in run-sphinx.sh already - -SCRIPT_PATH=$(dirname $BASH_SOURCE) -AMICI_PATH=$(cd $SCRIPT_PATH/.. && pwd) - -source ${AMICI_PATH}/doc-venv/bin/activate - -cd ${AMICI_PATH}/documentation - -rm -rf ${AMICI_PATH}/documentation/generated - -sphinx-build -T -E -W --keep-going -b readthedocs -d _build/doctrees-readthedocs -D language=en . _build/html - -ret=$? -if [[ $ret != 0 ]]; then exit $ret; fi diff --git a/scripts/run-sphinx.sh b/scripts/run-sphinx.sh deleted file mode 100755 index e7b1ce0861..0000000000 --- a/scripts/run-sphinx.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# generate code documentation via sphinx for upload to rtd - -SCRIPT_PATH=$(dirname $BASH_SOURCE) -AMICI_PATH=$(cd $SCRIPT_PATH/.. && pwd) - -python3 -m venv ${AMICI_PATH}/doc-venv --clear -source ${AMICI_PATH}/doc-venv/bin/activate -python -m pip install --upgrade --no-cache-dir pip setuptools wheel -(cd ${AMICI_PATH}/ && python -m pip install --exists-action=w --no-cache-dir -r documentation/rtd_requirements.txt) -(cd ${AMICI_PATH}/ && python -m pip install --exists-action=w --no-cache-dir -r documentation/rtd_requirements2.txt) - -${AMICI_PATH}/scripts/run-sphinx-hasenv.sh - -ret=$? -if [[ $ret != 0 ]]; then exit $ret; fi diff --git a/scripts/run-valgrind-py.sh b/scripts/run-valgrind-py.sh index 510e27868a..c2a6239ad4 100755 --- a/scripts/run-valgrind-py.sh +++ b/scripts/run-valgrind-py.sh @@ -1,7 +1,8 @@ #!/bin/bash -# Test python model wrapping inside virtual environment +# Without arguments: run Python test suite under valgrind +# With arguments: run whatever was passed as arguments under valgrind -script_path=$(dirname $BASH_SOURCE) +script_path=$(dirname "${BASH_SOURCE[0]}") amici_path=$(cd "$script_path"/.. && pwd) set -e @@ -9,23 +10,30 @@ set -e if [[ -z "${BNGPATH}" ]]; then export BNGPATH=${amici_path}/ThirdParty/BioNetGen-2.7.0 fi +suppressions="${amici_path}/python/tests/valgrind-python.supp" +if [ $# -eq 0 ] + then + # No arguments supplied, run all tests + cd "${amici_path}"/python/tests + source "${amici_path}"/build/venv/bin/activate + command=(python -m pytest -vv --ignore-glob=*petab* -W 'ignore:Signature ') + # ^ ignores the following warning that occurs only under valgrind, + # e.g. `valgrind python -c "import h5py"`: + # UserWarning: Signature b'\x00\xd0\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf\x00\x00\x00\x00\x00\x00' + # for does not match any known type: falling back to type probe function. +else + # Run whatever was passed as arguments + command=($@) +fi -cd "${amici_path}"/python/tests - -source "${amici_path}"/build/venv/bin/activate - -pip install scipy h5py pytest pytest-rerunfailures +set -x PYTHONMALLOC=malloc valgrind \ - --suppressions=valgrind-python.supp \ + --suppressions="${suppressions}" \ --show-leak-kinds=definite \ --errors-for-leak-kinds=definite \ --error-exitcode=1 \ --leak-check=full \ --gen-suppressions=all \ -v \ - python -m pytest -vv --ignore-glob=*petab* -W "ignore:Signature " -# ^ ignores the following warning that occurs only under valgrind, -# e.g. `valgrind python -c "import h5py"`: -# UserWarning: Signature b'\x00\xd0\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf\x00\x00\x00\x00\x00\x00' -# for does not match any known type: falling back to type probe function. + "${command[@]}" diff --git a/src/CMakeLists.template.cmake b/src/CMakeLists.template.cmake index 43df61ff61..572efede80 100644 --- a/src/CMakeLists.template.cmake +++ b/src/CMakeLists.template.cmake @@ -20,7 +20,7 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") endif() foreach(flag ${MY_CXX_FLAGS}) unset(CUR_FLAG_SUPPORTED CACHE) - check_cxx_compiler_flag(-Werror ${flag} CUR_FLAG_SUPPORTED) + check_cxx_compiler_flag(${flag} CUR_FLAG_SUPPORTED) if(${CUR_FLAG_SUPPORTED}) string(APPEND CMAKE_CXX_FLAGS " ${flag}") endif() @@ -39,6 +39,18 @@ find_package(Amici TPL_AMICI_VERSION REQUIRED HINTS ${CMAKE_CURRENT_LIST_DIR}/../../build) message(STATUS "Found AMICI ${Amici_DIR}") +# Debug build? +if("$ENV{ENABLE_AMICI_DEBUGGING}" OR "$ENV{ENABLE_GCOV_COVERAGE}") + add_compile_options(-UNDEBUG -O0 -g) + set(CMAKE_BUILD_TYPE "Debug") +endif() + +# coverage options +if($ENV{ENABLE_GCOV_COVERAGE}) + string(APPEND CMAKE_CXX_FLAGS_DEBUG " --coverage") + string(APPEND CMAKE_EXE_LINKER_FLAGS_DEBUG " --coverage") +endif() + set(MODEL_DIR ${CMAKE_CURRENT_LIST_DIR}) set(SRC_LIST_LIB TPL_SOURCES ${MODEL_DIR}/wrapfunctions.cpp) @@ -66,18 +78,6 @@ if(NOT "${AMICI_PYTHON_BUILD_EXT_ONLY}") target_link_libraries(simulate_${PROJECT_NAME} ${PROJECT_NAME}) endif() -# Debug build? -if("$ENV{ENABLE_AMICI_DEBUGGING}" OR "$ENV{ENABLE_GCOV_COVERAGE}") - add_compile_options(-UNDEBUG -O0 -g) - set(CMAKE_BUILD_TYPE "Debug") -endif() - -# coverage options -if($ENV{ENABLE_GCOV_COVERAGE}) - string(APPEND CMAKE_CXX_FLAGS_DEBUG " --coverage") - string(APPEND CMAKE_EXE_LINKER_FLAGS_DEBUG " --coverage") -endif() - # SWIG option(ENABLE_SWIG "Build swig/python library?" ON) if(ENABLE_SWIG) diff --git a/src/amici.cpp b/src/amici.cpp index ee3949b0bf..6e85dde857 100644 --- a/src/amici.cpp +++ b/src/amici.cpp @@ -13,11 +13,6 @@ #include //return codes #include //realtype -#include -#include -#include -#include -#include #include #include #include diff --git a/src/backwardproblem.cpp b/src/backwardproblem.cpp index d1b89967db..c3a859ca85 100644 --- a/src/backwardproblem.cpp +++ b/src/backwardproblem.cpp @@ -8,9 +8,6 @@ #include "amici/solver.h" #include "amici/steadystateproblem.h" -#include -#include - namespace amici { BackwardProblem::BackwardProblem( diff --git a/src/edata.cpp b/src/edata.cpp index ddc3b65992..05499bb9cf 100644 --- a/src/edata.cpp +++ b/src/edata.cpp @@ -5,7 +5,6 @@ #include "amici/symbolic_functions.h" // getNaN #include -#include #include #include @@ -339,6 +338,18 @@ realtype const* ExpData::getObservedEventsStdDevPtr(int ie) const { return nullptr; } +void ExpData::clear_observations() { + std::fill(observed_data_.begin(), observed_data_.end(), getNaN()); + std::fill( + observed_data_std_dev_.begin(), observed_data_std_dev_.end(), getNaN() + ); + std::fill(observed_events_.begin(), observed_events_.end(), getNaN()); + std::fill( + observed_events_std_dev_.begin(), observed_events_std_dev_.end(), + getNaN() + ); +} + void ExpData::applyDimensions() { applyDataDimension(); applyEventDimension(); diff --git a/src/exception.cpp b/src/exception.cpp index 3a5811e4f5..81ec938d29 100644 --- a/src/exception.cpp +++ b/src/exception.cpp @@ -3,7 +3,6 @@ #include #include -#include namespace amici { @@ -34,14 +33,20 @@ void AmiException::storeMessage(char const* fmt, va_list argptr) { vsnprintf(msg_.data(), msg_.size(), fmt, argptr); } -CvodeException::CvodeException(int const error_code, char const* function) +CvodeException::CvodeException( + int const error_code, char const* function, char const* extra +) : AmiException( - "Cvode routine %s failed with error code %i", function, error_code + "CVODE routine %s failed with error code %i. %s", function, error_code, + extra ? extra : "" ) {} -IDAException::IDAException(int const error_code, char const* function) +IDAException::IDAException( + int const error_code, char const* function, char const* extra +) : AmiException( - "IDA routine %s failed with error code %i", function, error_code + "IDA routine %s failed with error code %i. %s", function, error_code, + extra ? extra : "" ) {} IntegrationFailure::IntegrationFailure(int code, realtype t) diff --git a/src/forwardproblem.cpp b/src/forwardproblem.cpp index c70a9074b1..c090f51280 100644 --- a/src/forwardproblem.cpp +++ b/src/forwardproblem.cpp @@ -9,10 +9,30 @@ #include #include -#include namespace amici { +/** + * @brief Check if the next timepoint is too close to the current timepoint. + * + * Based on CVODES' `cvHin`. + * @param cur_t Current time. + * @param t_next Next stop time. + * @return True if too close, false otherwise. + */ +bool is_next_t_too_close(realtype cur_t, realtype t_next) { + auto tdiff = t_next - cur_t; + if(tdiff == 0.0) + return true; + + auto tdist = std::fabs(tdiff); + auto tround = std::numeric_limits::epsilon() * std::max(std::fabs(cur_t), std::fabs(t_next)); + if (tdist < 2.0 * tround) + return true; + + return false; +} + ForwardProblem::ForwardProblem( ExpData const* edata, Model* model, Solver* solver, SteadystateProblem const* preeq @@ -110,30 +130,69 @@ void ForwardProblem::workForwardProblem() { /* store initial state and sensitivity*/ initial_state_ = getSimulationState(); + // store root information at t0 + model->froot(t_, x_, dx_, rootvals_); + + // get list of trigger timepoints for fixed-time triggered events + auto trigger_timepoints = model->get_trigger_timepoints(); + auto it_trigger_timepoints = std::find_if( + trigger_timepoints.begin(), trigger_timepoints.end(), + [this](auto t) { return t > this->t_; } + ); /* loop over timepoints */ for (it_ = 0; it_ < model->nt(); it_++) { - auto nextTimepoint = model->getTimepoint(it_); + // next output time-point + auto next_t_out = model->getTimepoint(it_); - if (std::isinf(nextTimepoint)) + if (std::isinf(next_t_out)) break; - if (nextTimepoint > model->t0()) { - // Solve for nextTimepoint - while (t_ < nextTimepoint) { - int status = solver->run(nextTimepoint); - solver->writeSolution(&t_, x_, dx_, sx_, dx_); + if (next_t_out > model->t0()) { + // Solve for next output timepoint + while (t_ < next_t_out) { + if (is_next_t_too_close(t_, next_t_out)) { + // next timepoint is too close to current timepoint. + // we use the state of the current timepoint. + break; + } + + // next stop time is next output timepoint or next + // time-triggered event + auto next_t_event + = it_trigger_timepoints != trigger_timepoints.end() + ? *it_trigger_timepoints + : std::numeric_limits::infinity(); + auto next_t_stop = std::min(next_t_out, next_t_event); + + int status = solver->run(next_t_stop); /* sx will be copied from solver on demand if sensitivities are computed */ + solver->writeSolution(&t_, x_, dx_, sx_, dx_); + if (status == AMICI_ILL_INPUT) { - /* clustering of roots => turn off rootfinding */ + /* clustering of roots => turn off root-finding */ solver->turnOffRootFinding(); - } else if (status == AMICI_ROOT_RETURN) { + } else if (status == AMICI_ROOT_RETURN || t_ == next_t_event) { + // solver-tracked or time-triggered event + solver->getRootInfo(roots_found_.data()); + + // check if we are at a trigger timepoint. + // if so, set the root-found flag + if (t_ == next_t_event) { + for (auto ie : model->state_independent_events_[t_]) { + // determine direction of root crossing from + // root function value at the previous event + roots_found_[ie] = std::copysign(1, -rootvals_[ie]); + } + ++it_trigger_timepoints; + } + handleEvent(&tlastroot_, false, false); } } } - handleDataPoint(it_); + handleDataPoint(next_t_out); } /* fill events */ @@ -157,13 +216,9 @@ void ForwardProblem::handleEvent( /* store Heaviside information at event occurrence */ model->froot(t_, x_, dx_, rootvals_); - /* store timepoint at which the event occurred*/ + /* store timepoint at which the event occurred */ discs_.push_back(t_); - /* extract and store which events occurred */ - if (!seflag && !initial_event) { - solver->getRootInfo(roots_found_.data()); - } root_idx_.push_back(roots_found_); rval_tmp_ = rootvals_; @@ -183,6 +238,77 @@ void ForwardProblem::handleEvent( if (model->nz > 0) storeEvent(); + store_pre_event_state(seflag, initial_event); + + if (!initial_event) + model->updateHeaviside(roots_found_); + + applyEventBolus(); + + if (solver->computingFSA()) { + /* compute the new xdot */ + model->fxdot(t_, x_, dx_, xdot_); + applyEventSensiBolusFSA(); + } + + handle_secondary_event(tlastroot); + + /* only reinitialise in the first event fired */ + if (!seflag) { + solver->reInit(t_, x_, dx_); + if (solver->computingFSA()) { + solver->sensReInit(sx_, sdx_); + } + } +} + +void ForwardProblem::storeEvent() { + if (t_ == model->getTimepoint(model->nt() - 1)) { + // call from fillEvent at last timepoint + model->froot(t_, x_, dx_, rootvals_); + for (int ie = 0; ie < model->ne; ie++) { + roots_found_.at(ie) = (nroots_.at(ie) < model->nMaxEvent()) ? 1 : 0; + } + root_idx_.push_back(roots_found_); + } + + if (getRootCounter() < getEventCounter()) { + /* update stored state (sensi) */ + event_states_.at(getRootCounter()) = getSimulationState(); + } else { + /* add stored state (sensi) */ + event_states_.push_back(getSimulationState()); + } + + /* EVENT OUTPUT */ + for (int ie = 0; ie < model->ne; ie++) { + /* only look for roots of the rootfunction not discontinuities */ + if (nroots_.at(ie) >= model->nMaxEvent()) + continue; + + /* only consider transitions false -> true or event filling */ + if (roots_found_.at(ie) != 1 + && t_ != model->getTimepoint(model->nt() - 1)) { + continue; + } + + if (edata && solver->computingASA()) + model->getAdjointStateEventUpdate( + slice(dJzdx_, nroots_.at(ie), model->nx_solver * model->nJ), ie, + nroots_.at(ie), t_, x_, *edata + ); + + nroots_.at(ie)++; + } + + if (t_ == model->getTimepoint(model->nt() - 1)) { + // call from fillEvent at last timepoint + // loop until all events are filled + fillEvents(model->nMaxEvent()); + } +} + +void ForwardProblem::store_pre_event_state(bool seflag, bool initial_event) { /* if we need to do forward sensitivities later on we need to store the old * x and the old xdot */ if (solver->getSensitivityOrder() >= SensitivityOrder::first) { @@ -213,18 +339,9 @@ void ForwardProblem::handleEvent( xdot_disc_.push_back(xdot_); xdot_old_disc_.push_back(xdot_old_); } +} - if (!initial_event) - model->updateHeaviside(roots_found_); - - applyEventBolus(); - - if (solver->computingFSA()) { - /* compute the new xdot */ - model->fxdot(t_, x_, dx_, xdot_); - applyEventSensiBolusFSA(); - } - +void ForwardProblem::handle_secondary_event(realtype* tlastroot) { int secondevent = 0; /* check whether we need to fire a secondary event */ @@ -261,67 +378,13 @@ void ForwardProblem::handleEvent( ); handleEvent(tlastroot, true, false); } - - /* only reinitialise in the first event fired */ - if (!seflag) { - solver->reInit(t_, x_, dx_); - if (solver->computingFSA()) { - solver->sensReInit(sx_, sdx_); - } - } -} - -void ForwardProblem::storeEvent() { - if (t_ == model->getTimepoint(model->nt() - 1)) { - // call from fillEvent at last timepoint - model->froot(t_, x_, dx_, rootvals_); - for (int ie = 0; ie < model->ne; ie++) { - roots_found_.at(ie) = (nroots_.at(ie) < model->nMaxEvent()) ? 1 : 0; - } - root_idx_.push_back(roots_found_); - } - - if (getRootCounter() < getEventCounter()) { - /* update stored state (sensi) */ - event_states_.at(getRootCounter()) = getSimulationState(); - } else { - /* add stored state (sensi) */ - event_states_.push_back(getSimulationState()); - } - - /* EVENT OUTPUT */ - for (int ie = 0; ie < model->ne; ie++) { - /* only look for roots of the rootfunction not discontinuities */ - if (nroots_.at(ie) >= model->nMaxEvent()) - continue; - - /* only consider transitions false -> true or event filling */ - if (roots_found_.at(ie) != 1 - && t_ != model->getTimepoint(model->nt() - 1)) { - continue; - } - - if (edata && solver->computingASA()) - model->getAdjointStateEventUpdate( - slice(dJzdx_, nroots_.at(ie), model->nx_solver * model->nJ), ie, - nroots_.at(ie), t_, x_, *edata - ); - - nroots_.at(ie)++; - } - - if (t_ == model->getTimepoint(model->nt() - 1)) { - // call from fillEvent at last timepoint - // loop until all events are filled - fillEvents(model->nMaxEvent()); - } } -void ForwardProblem::handleDataPoint(int /*it*/) { +void ForwardProblem::handleDataPoint(realtype t) { /* We only store the simulation state if it's not the initial state, as the initial state is stored anyway and we want to avoid storing it twice */ - if (t_ != model->t0() && timepoint_states_.count(t_) == 0) - timepoint_states_[t_] = getSimulationState(); + if (t != model->t0() && timepoint_states_.count(t) == 0) + timepoint_states_[t] = getSimulationState(); /* store diagnosis information for debugging */ solver->storeDiagnosis(); } diff --git a/src/hdf5.cpp b/src/hdf5.cpp index e0cdde88cd..d9d875cdf7 100644 --- a/src/hdf5.cpp +++ b/src/hdf5.cpp @@ -16,7 +16,6 @@ #include - namespace amici { namespace hdf5 { @@ -1161,8 +1160,8 @@ void readModelDataFromHDF5( model.setSteadyStateComputationMode( static_cast(getIntScalarAttribute( file, datasetPath, "steadyStateComputationMode" - )) - ); + )) + ); } if (attributeExists(file, datasetPath, "steadyStateSensitivityMode")) { diff --git a/src/model.cpp b/src/model.cpp index 017b9cf871..3c78802731 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include #include @@ -178,12 +178,14 @@ Model::Model( SimulationParameters simulation_parameters, SecondOrderMode o2mode, std::vector idlist, std::vector z2event, bool const pythonGenerated, int const ndxdotdp_explicit, - int const ndxdotdx_explicit, int const w_recursion_depth + int const ndxdotdx_explicit, int const w_recursion_depth, + std::map> state_independent_events ) : ModelDimensions(model_dimensions) , pythonGenerated(pythonGenerated) , o2mode(o2mode) , idlist(std::move(idlist)) + , state_independent_events_(std::move(state_independent_events)) , derived_state_(model_dimensions) , z2event_(std::move(z2event)) , state_is_non_negative_(nx_solver, false) @@ -297,6 +299,7 @@ bool operator==(ModelDimensions const& a, ModelDimensions const& b) { && (a.nx_solver_reinit == b.nx_solver_reinit) && (a.np == b.np) && (a.nk == b.nk) && (a.ny == b.ny) && (a.nytrue == b.nytrue) && (a.nz == b.nz) && (a.nztrue == b.nztrue) && (a.ne == b.ne) + && (a.ne_solver == b.ne_solver) && (a.nspl == b.nspl) && (a.nw == b.nw) && (a.ndwdx == b.ndwdx) && (a.ndwdp == b.ndwdp) && (a.ndwdw == b.ndwdw) && (a.ndxdotdw == b.ndxdotdw) && (a.ndJydy == b.ndJydy) && (a.nnz == b.nnz) && (a.nJ == b.nJ) @@ -1815,7 +1818,7 @@ int Model::checkFinite(SUNMatrix m, ModelQuantity model_quantity, realtype t) if (hasExpressionIds()) row_id += " " + getExpressionIds()[row]; if (hasParameterIds()) - col_id += " " + getParameterIds()[plist(gsl::narrow(col))]; + col_id += " " + getParameterIds()[col]; break; default: break; @@ -3071,6 +3074,20 @@ void Model::fstotal_cl( ); } +std::vector Model::get_trigger_timepoints() const { + std::vector trigger_timepoints( + state_independent_events_.size(), 0.0 + ); + // collect keys from state_independent_events_ which are the trigger + // timepoints + auto it = trigger_timepoints.begin(); + for (auto const& kv : state_independent_events_) { + *(it++) = kv.first; + } + std::sort(trigger_timepoints.begin(), trigger_timepoints.end()); + return trigger_timepoints; +} + const_N_Vector Model::computeX_pos(const_N_Vector x) { if (any_state_non_negative_) { for (int ix = 0; ix < derived_state_.x_pos_tmp_.getLength(); ++ix) { diff --git a/src/model_header.template.h b/src/model_header.template.h index af05c8ccc5..932fdeb1a0 100644 --- a/src/model_header.template.h +++ b/src/model_header.template.h @@ -121,6 +121,7 @@ class Model_TPL_MODELNAME : public amici::Model_TPL_MODEL_TYPE_UPPER { TPL_NZ, // nz TPL_NZTRUE, // nztrue TPL_NEVENT, // nevent + TPL_NEVENT_SOLVER, // nevent_solver TPL_NSPL, // nspl TPL_NOBJECTIVE, // nobjective TPL_NW, // nw @@ -146,7 +147,8 @@ class Model_TPL_MODELNAME : public amici::Model_TPL_MODEL_TYPE_UPPER { true, // pythonGenerated TPL_NDXDOTDP_EXPLICIT, // ndxdotdp_explicit TPL_NDXDOTDX_EXPLICIT, // ndxdotdx_explicit - TPL_W_RECURSION_DEPTH // w_recursion_depth + TPL_W_RECURSION_DEPTH, // w_recursion_depth + {TPL_STATE_INDEPENDENT_EVENTS} // state-independent events ) { root_initial_values_ = std::vector( rootInitialValues.begin(), rootInitialValues.end() diff --git a/src/newton_solver.cpp b/src/newton_solver.cpp index b8cbe8f34d..8c3fcca5f4 100644 --- a/src/newton_solver.cpp +++ b/src/newton_solver.cpp @@ -8,10 +8,6 @@ #include // dense solver #include // sparse solver -#include -#include -#include - namespace amici { NewtonSolver::NewtonSolver(Model const& model) diff --git a/src/rdata.cpp b/src/rdata.cpp index 56fc0023c0..e96f295d23 100644 --- a/src/rdata.cpp +++ b/src/rdata.cpp @@ -10,7 +10,6 @@ #include "amici/symbolic_functions.h" #include -#include namespace amici { diff --git a/src/solver.cpp b/src/solver.cpp index 56bed2a1a3..22e1723640 100644 --- a/src/solver.cpp +++ b/src/solver.cpp @@ -5,8 +5,6 @@ #include "amici/symbolic_functions.h" #include -#include -#include #include namespace amici { @@ -161,7 +159,7 @@ void Solver::setup( /* activates stability limit detection */ setStabLimDet(stldet_); - rootInit(model->ne); + rootInit(model->ne_solver); if (nx() == 0) return; diff --git a/src/solver_cvodes.cpp b/src/solver_cvodes.cpp index 7157302c9e..b53be68c2e 100644 --- a/src/solver_cvodes.cpp +++ b/src/solver_cvodes.cpp @@ -13,6 +13,8 @@ #include #include +#include + #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define FOUR RCONST(4.0) @@ -490,13 +492,16 @@ void CVodeSolver::reInitPostProcess( if (status == CV_ROOT_RETURN) throw CvodeException( status, - "CVode returned a root after " - "reinitialization. The initial step-size after the event or " - "heaviside function is too small. To fix this, increase absolute " + "CVode returned a root after reinitialization. " + "The initial step-size after the event or " + "Heaviside function is too small. To fix this, increase absolute " "and relative tolerances!" ); - if (status != CV_SUCCESS) - throw CvodeException(status, "reInitPostProcess"); + if (status != CV_SUCCESS) { + std::stringstream msg; + msg<<"tout: "<cv_nst = nst_tmp + 1; if (cv_mem->cv_adjMallocDone == SUNTRUE) { @@ -515,7 +520,7 @@ void CVodeSolver::reInitPostProcess( dt_mem[cv_mem->cv_nst % ca_mem->ca_nsteps]->t = *t; ca_mem->ca_IMstore(cv_mem, dt_mem[cv_mem->cv_nst % ca_mem->ca_nsteps]); - /* Set t1 field of the current ckeck point structure + /* Set t1 field of the current check point structure for the case in which there will be no future check points */ ca_mem->ck_mem->ck_t1 = *t; @@ -1066,9 +1071,17 @@ static int froot(realtype t, N_Vector x, realtype* root, void* user_data) { auto model = dynamic_cast(typed_udata->first); Expects(model); - model->froot(t, x, gsl::make_span(root, model->ne)); + if (model->ne != model->ne_solver) { + // temporary buffer to store all root function values, not only the ones + // tracked by the solver + static std::vector root_buffer(model->ne, 0.0); + model->froot(t, x, root_buffer); + std::copy_n(root_buffer.begin(), model->ne_solver, root); + } else { + model->froot(t, x, gsl::make_span(root, model->ne_solver)); + } return model->checkFinite( - gsl::make_span(root, model->ne), ModelQuantity::root + gsl::make_span(root, model->ne_solver), ModelQuantity::root ); } diff --git a/src/splinefunctions.cpp b/src/splinefunctions.cpp index ba9865a729..8c888b450a 100644 --- a/src/splinefunctions.cpp +++ b/src/splinefunctions.cpp @@ -2,9 +2,9 @@ #include "amici/amici.h" #include "amici/defines.h" #include "amici/exception.h" -#include "amici/vector.h" #include // std::min +#include #include #include diff --git a/src/steadystateproblem.cpp b/src/steadystateproblem.cpp index c655b9b386..98c36589f7 100644 --- a/src/steadystateproblem.cpp +++ b/src/steadystateproblem.cpp @@ -8,7 +8,6 @@ #include "amici/solver.h" #include -#include #include #include #include @@ -602,8 +601,6 @@ void SteadystateProblem::applyNewtonsMethod(Model& model, bool newton_retry) { int& i_newtonstep = numsteps_.at(newton_retry ? 2 : 0); i_newtonstep = 0; gamma_ = 1.0; - bool update_direction = true; - bool step_successful = false; if (model.nx_solver == 0) return; @@ -614,6 +611,8 @@ void SteadystateProblem::applyNewtonsMethod(Model& model, bool newton_retry) { bool converged = false; wrms_ = getWrms(model, SensitivityMethod::none); converged = newton_retry ? false : wrms_ < conv_thresh; + bool update_direction = true; + while (!converged && i_newtonstep < max_steps_) { /* If Newton steps are necessary, compute the initial search @@ -635,7 +634,7 @@ void SteadystateProblem::applyNewtonsMethod(Model& model, bool newton_retry) { /* Compute new xdot and residuals */ realtype wrms_tmp = getWrms(model, SensitivityMethod::none); - step_successful = wrms_tmp < wrms_; + bool step_successful = wrms_tmp < wrms_; if (step_successful) { /* If new residuals are smaller than old ones, update state */ wrms_ = wrms_tmp; diff --git a/src/sundials_linsol_wrapper.cpp b/src/sundials_linsol_wrapper.cpp index de5d4f1d61..765f2a1f91 100644 --- a/src/sundials_linsol_wrapper.cpp +++ b/src/sundials_linsol_wrapper.cpp @@ -2,7 +2,6 @@ #include -#include // bad_alloc #include namespace amici { diff --git a/src/sundials_matrix_wrapper.cpp b/src/sundials_matrix_wrapper.cpp index b5c7300628..a86574cd82 100644 --- a/src/sundials_matrix_wrapper.cpp +++ b/src/sundials_matrix_wrapper.cpp @@ -793,20 +793,25 @@ unravel_index(sunindextype i, SUNMatrix m) { } if (mat_id == SUNMATRIX_SPARSE) { - gsl_ExpectsDebug(i < SM_NNZ_S(m)); - sunindextype row = SM_INDEXVALS_S(m)[i]; - sunindextype i_colptr = 0; - while (SM_INDEXPTRS_S(m)[i_colptr] < SM_NNZ_S(m)) { - if (SM_INDEXPTRS_S(m)[i_colptr + 1] > i) { - sunindextype col = i_colptr; - gsl_EnsuresDebug(row >= 0); - gsl_EnsuresDebug(row < SM_ROWS_S(m)); - gsl_EnsuresDebug(col >= 0); - gsl_EnsuresDebug(col < SM_COLUMNS_S(m)); - return {row, col}; - } - ++i_colptr; - } + auto nnz = SM_NNZ_S(m); + auto ncols = SM_COLUMNS_S(m); + auto index_vals = SM_INDEXVALS_S(m); + auto index_ptrs = SM_INDEXPTRS_S(m); + gsl_ExpectsDebug(i < nnz); + sunindextype row = index_vals[i]; + sunindextype col = 0; + while (col < ncols && index_ptrs[col + 1] <= i) + ++col; + + // This can happen if indexvals / indexptrs haven't been set. + if(col == ncols) + return {-1, -1}; + + gsl_EnsuresDebug(row >= 0); + gsl_EnsuresDebug(row < SM_ROWS_S(m)); + gsl_EnsuresDebug(col >= 0); + gsl_EnsuresDebug(col < ncols); + return {row, col}; } throw amici::AmiException("Unimplemented SUNMatrix type for unravel_index"); diff --git a/src/symbolic_functions.cpp b/src/symbolic_functions.cpp index 6c18d851b7..37e10fde62 100644 --- a/src/symbolic_functions.cpp +++ b/src/symbolic_functions.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #if _MSC_VER && !__INTEL_COMPILER #include #define alloca _alloca diff --git a/swig/amici.i b/swig/amici.i index 9eac8e5046..3518b296fe 100644 --- a/swig/amici.i +++ b/swig/amici.i @@ -1,7 +1,7 @@ %define DOCSTRING """ Core C++ bindings ------------------ + This module encompasses the complete public C++ API of AMICI, which was exposed via swig. All functions listed here are directly accessible in the main amici package, i.e., :py:class:`amici.amici.ExpData` is available as @@ -16,6 +16,7 @@ nonstandard type conversions. // typemaps for docstrings %typemap(doctype) std::unique_ptr< amici::ExpData >::pointer "ExpData"; +%typemap(doctype) std::unique_ptr< amici::Model > "ModelPtr"; %typemap(doctype) std::unique_ptr< amici::Solver > "SolverPtr"; %typemap(doctype) std::vector< amici::realtype,std::allocator< amici::realtype > > "DoubleVector"; %typemap(doctype) std::vector< double,std::allocator< double > > "DoubleVector"; @@ -43,8 +44,8 @@ nonstandard type conversions. %typemap(doctype) amici::SteadyStateSensitivityMode "SteadyStateSensitivityMode"; %typemap(doctype) amici::realtype "float"; %typemap(doctype) DoubleVector "numpy.ndarray"; -%typemap(doctype) IntVector "List[int]"; -%typemap(doctype) std::pair< size_t,size_t > "Tuple[int, int]"; +%typemap(doctype) IntVector "list[int]"; +%typemap(doctype) std::pair< size_t,size_t > "tuple[int, int]"; %typemap(doctype) std::string "str"; %typemap(doctype) std::string const & "str"; %typemap(doctype) std::unique_ptr< amici::ExpData > "ExpData"; @@ -343,8 +344,19 @@ if sys.platform == 'win32' and (dll_dirs := os.environ.get('AMICI_DLL_DIRS')): // import additional types for typehints // also import np for use in __repr__ functions %pythonbegin %{ -from typing import TYPE_CHECKING, Iterable, List, Tuple, Sequence +from typing import TYPE_CHECKING, Iterable, Sequence import numpy as np if TYPE_CHECKING: import numpy %} + +%pythoncode %{ + + +__all__ = [ + x + for x in dir(sys.modules[__name__]) + if not x.startswith('_') + and x not in {"np", "sys", "os", "numpy", "IntEnum", "enum", "pi", "TYPE_CHECKING", "Iterable", "Sequence"} +] +%} diff --git a/swig/model.i b/swig/model.i index 3063590c21..ee3286e1a4 100644 --- a/swig/model.i +++ b/swig/model.i @@ -94,10 +94,21 @@ using namespace amici; %ignore fdx_rdatadx_solver; %ignore fdsigmaydy; +%newobject amici::Model::clone; +%extend amici::Model { +%pythoncode %{ +def __deepcopy__(self, memo): + return self.clone() +%} +}; - -%newobject amici::Model::clone; +%extend std::unique_ptr { +%pythoncode %{ +def __deepcopy__(self, memo): + return self.clone() +%} +}; // Process symbols in header %include "amici/model.h" diff --git a/swig/solver.i b/swig/solver.i index 992842c409..20641ba31f 100644 --- a/swig/solver.i +++ b/swig/solver.i @@ -113,9 +113,18 @@ def __repr__(self): %pythoncode %{ def __repr__(self): return _solver_repr(self) + +def __deepcopy__(self, memo): + return self.clone() %} }; +%extend amici::Solver { +%pythoncode %{ +def __deepcopy__(self, memo): + return self.clone() +%} +}; %newobject amici::Solver::clone; // Process symbols in header diff --git a/swig/std_unique_ptr.i b/swig/std_unique_ptr.i index 1063bd75b1..c44513bcee 100644 --- a/swig/std_unique_ptr.i +++ b/swig/std_unique_ptr.i @@ -6,8 +6,11 @@ namespace std { struct unique_ptr { typedef Type* pointer; + %apply SWIGTYPE *DISOWN { pointer Ptr }; explicit unique_ptr( pointer Ptr ); + %clear pointer Ptr; unique_ptr (unique_ptr&& Right); + template unique_ptr( unique_ptr&& Right ); unique_ptr( const unique_ptr& Right) = delete; diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py old mode 100755 new mode 100644 index 0b3c6d80e0..753c88e500 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -1,30 +1,25 @@ """Tests for simulate_petab on PEtab benchmark problems.""" - +import os from pathlib import Path import amici -import amici.petab_import -import amici.petab_objective import numpy as np import pandas as pd import petab import pytest -from fiddy import MethodId, get_derivative -from fiddy.derivative_check import NumpyIsCloseDerivativeCheck -from fiddy.extensions.amici import simulate_petab_to_cached_functions -from fiddy.success import Consistency +from amici.petab.petab_import import import_petab_problem # Absolute and relative tolerances for finite difference gradient checks. ATOL: float = 1e-3 RTOL: float = 1e-2 -benchmark_path = ( - Path(__file__).parent.parent.parent - / "Benchmark-Models-PEtab" - / "Benchmark-Models" -) +repo_root = Path(__file__).parent.parent.parent +benchmark_path = repo_root / "Benchmark-Models-PEtab" / "Benchmark-Models" +if not benchmark_path.exists(): + benchmark_path = Path(os.environ["BENCHMARK_COLLECTION"]) + # reuse compiled models from test_benchmark_collection.sh -benchmark_outdir = Path(__file__).parent.parent.parent / "test_bmc" +benchmark_outdir = repo_root / "test_bmc" models = [ str(petab_path.stem) for petab_path in benchmark_path.glob("*") @@ -52,10 +47,19 @@ debug_path.mkdir(exist_ok=True, parents=True) +# until fiddy is updated +@pytest.mark.filterwarnings( + "ignore:Importing amici.petab_objective is deprecated.:DeprecationWarning" +) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log10") @pytest.mark.parametrize("scale", (True, False)) @pytest.mark.parametrize("model", models) def test_benchmark_gradient(model, scale): + from fiddy import MethodId, get_derivative + from fiddy.derivative_check import NumpyIsCloseDerivativeCheck + from fiddy.extensions.amici import simulate_petab_to_cached_functions + from fiddy.success import Consistency + if not scale and model in ( "Smith_BMCSystBiol2013", "Brannmark_JBC2010", @@ -81,7 +85,7 @@ def test_benchmark_gradient(model, scale): parameter_ids = list(parameter_df_free.index) # Setup AMICI objects. - amici_model = amici.petab_import.import_petab_problem( + amici_model = import_petab_problem( petab_problem, model_output_dir=benchmark_outdir / model, ) diff --git a/tests/benchmark-models/test_petab_model.py b/tests/benchmark-models/test_petab_model.py index cf85147535..8f52a341c4 100755 --- a/tests/benchmark-models/test_petab_model.py +++ b/tests/benchmark-models/test_petab_model.py @@ -9,6 +9,7 @@ import logging import os import sys +from pathlib import Path import amici import numpy as np @@ -16,7 +17,7 @@ import petab import yaml from amici.logging import get_logger -from amici.petab_objective import ( +from amici.petab.simulations import ( LLH, RDATAS, rdatas_to_measurement_df, @@ -100,7 +101,7 @@ def parse_cli_args(): def main(): """Simulate the model specified on the command line""" - + script_dir = Path(__file__).parent.absolute() args = parse_cli_args() loglevel = logging.DEBUG if args.verbose else logging.INFO logger.setLevel(loglevel) @@ -168,10 +169,7 @@ def main(): times["np"] = sum(problem.parameter_df[petab.ESTIMATE]) - pd.Series(times).to_csv( - f"./tests/benchmark-models/{args.model_name}_benchmark.csv" - ) - + pd.Series(times).to_csv(script_dir / f"{args.model_name}_benchmark.csv") for rdata in rdatas: assert ( rdata.status == amici.AMICI_SUCCESS @@ -201,9 +199,7 @@ def main(): ax.get_figure().savefig(fig_path, dpi=150) if args.check: - references_yaml = os.path.join( - os.path.dirname(__file__), "benchmark_models.yaml" - ) + references_yaml = script_dir / "benchmark_models.yaml" with open(references_yaml) as f: refs = yaml.full_load(f) diff --git a/tests/conftest.py b/tests/conftest.py index 9e90400518..9b7dd7fb08 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,10 +3,13 @@ import re import sys from pathlib import Path -from typing import List, Set, Tuple +from typing import TYPE_CHECKING import pytest +if TYPE_CHECKING: + from _pytest.reports import TestReport + # stores passed SBML semantic test suite IDs passed_ids = [] @@ -21,7 +24,7 @@ def sbml_semantic_cases_dir() -> Path: return SBML_SEMANTIC_CASES_DIR -def parse_selection(selection_str: str, last: int) -> List[int]: +def parse_selection(selection_str: str, last: int) -> list[int]: """ Parse comma-separated list of integer ranges, return selected indices as integer list @@ -128,7 +131,7 @@ def pytest_runtest_logreport(report: "TestReport") -> None: passed_ids.append(test_case_id) -def get_tags_for_test(test_id: str) -> Tuple[Set[str], Set[str]]: +def get_tags_for_test(test_id: str) -> tuple[set[str], set[str]]: """Get sbml test suite tags for the given test ID Returns: diff --git a/tests/cpp/unittests/testExpData.cpp b/tests/cpp/unittests/testExpData.cpp index 416a41227b..d6e1a6fff2 100644 --- a/tests/cpp/unittests/testExpData.cpp +++ b/tests/cpp/unittests/testExpData.cpp @@ -4,8 +4,6 @@ #include #include -#include -#include #include #include @@ -49,6 +47,7 @@ class ExpDataTest : public ::testing::Test { nz, // nz nz, // nztrue nmaxevent, // ne + 0, // ne_solver 0, // nspl 0, // nJ 0, // nw diff --git a/tests/cpp/unittests/testMisc.cpp b/tests/cpp/unittests/testMisc.cpp index 80d2c3bc36..f18de96b79 100644 --- a/tests/cpp/unittests/testMisc.cpp +++ b/tests/cpp/unittests/testMisc.cpp @@ -65,6 +65,7 @@ class ModelTest : public ::testing::Test { nz, // nz nz, // nztrue nmaxevent, // ne + 0, // ne_solver 0, // nspl 0, // nJ 0, // nw @@ -303,6 +304,7 @@ class SolverTest : public ::testing::Test { nz, // nz nz, // nztrue ne, // ne + 0, // ne_solver 0, // nspl 0, // nJ 0, // nw @@ -687,7 +689,7 @@ TEST(UnravelIndex, UnravelIndexSunMatSparse) // [2, 0] // data [1, 2, 3] // colptrs [0, 2, 3] - // rowidxs [2, 3, 1] + // rowidxs [2, 3, 0] D.set_data(0, 0, 0); D.set_data(1, 0, 0); D.set_data(2, 0, 1); @@ -706,6 +708,16 @@ TEST(UnravelIndex, UnravelIndexSunMatSparse) SUNMatDestroy(S); } + +TEST(UnravelIndex, UnravelIndexSunMatSparseMissingIndices) +{ + // Sparse matrix without any indices set + SUNMatrixWrapper mat = SUNMatrixWrapper(2, 3, 2, CSC_MAT); + EXPECT_EQ(unravel_index(0, mat.get()), std::make_pair((sunindextype) -1, (sunindextype) -1)); + EXPECT_EQ(unravel_index(1, mat.get()), std::make_pair((sunindextype) -1, (sunindextype) -1)); +} + + TEST(ReturnCodeToStr, ReturnCodeToStr) { EXPECT_EQ("AMICI_SUCCESS", simulation_status_to_str(AMICI_SUCCESS)); diff --git a/tests/cpp/unittests/testSerialization.cpp b/tests/cpp/unittests/testSerialization.cpp index 5b4fb1ed2a..f59f04d9c7 100644 --- a/tests/cpp/unittests/testSerialization.cpp +++ b/tests/cpp/unittests/testSerialization.cpp @@ -5,7 +5,7 @@ #include "testfunctions.h" #include - +#include #include void @@ -142,6 +142,7 @@ TEST(ModelSerializationTest, ToFile) nz, // nz nz, // nztrue ne, // ne + 0, // ne_solver 0, // nspl 0, // nJ 9, // nw @@ -207,6 +208,7 @@ TEST(ReturnDataSerializationTest, ToString) nz, // nz nz, // nztrue ne, // ne + 0, // ne_solver 0, // nspl 0, // nJ 9, // nw diff --git a/tests/generateTestConfig/example_steadystate.py b/tests/generateTestConfig/example_steadystate.py index 07fab49dfe..80e8a776d2 100755 --- a/tests/generateTestConfig/example_steadystate.py +++ b/tests/generateTestConfig/example_steadystate.py @@ -2,7 +2,7 @@ import sys import numpy as np -from example import AmiciExample, dict2attrs +from example import AmiciExample class ExampleSteadystate(AmiciExample): diff --git a/tests/petab_test_suite/conftest.py b/tests/petab_test_suite/conftest.py index df0b00ee86..2e1c6d3cea 100644 --- a/tests/petab_test_suite/conftest.py +++ b/tests/petab_test_suite/conftest.py @@ -2,12 +2,11 @@ import re import sys -from typing import List from petabtests.core import get_cases -def parse_selection(selection_str: str) -> List[int]: +def parse_selection(selection_str: str) -> list[int]: """ Parse comma-separated list of integer ranges, return selected indices as integer list diff --git a/tests/petab_test_suite/test_petab_suite.py b/tests/petab_test_suite/test_petab_suite.py index 35ee3adcfc..0924c09576 100755 --- a/tests/petab_test_suite/test_petab_suite.py +++ b/tests/petab_test_suite/test_petab_suite.py @@ -13,12 +13,9 @@ from amici import SteadyStateSensitivityMode from amici.gradient_check import check_derivatives as amici_check_derivatives from amici.logging import get_logger, set_log_level -from amici.petab_import import import_petab_problem -from amici.petab_objective import ( - create_parameterized_edatas, - rdatas_to_measurement_df, - simulate_petab, -) +from amici.petab.conditions import create_parameterized_edatas +from amici.petab.petab_import import import_petab_problem +from amici.petab.simulations import rdatas_to_measurement_df, simulate_petab logger = get_logger(__name__, logging.DEBUG) set_log_level(get_logger("amici.petab_import"), logging.DEBUG) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..a61ee8a6e2 --- /dev/null +++ b/tox.ini @@ -0,0 +1,24 @@ +[tox] +env_list = + py311 +minversion = 4.11.3 +envlist = + doc + +[testenv] +passenv = AMICI_PARALLEL_COMPILE,CC,CXX + +[testenv:doc] +description = + Build documentation +deps = + -r documentation/rtd_requirements.txt + -r documentation/rtd_requirements2.txt +# don't install the package, this is already handled by `deps` above +skip_install = true +change_dir = documentation/ +allowlist_externals = + rm +commands = + rm -rf amici_models/ _doxyoutput_amici_cpp/ _exhale_cpp_api/ _exhale_matlab_api/ + sphinx-build -T -E -W --keep-going -b readthedocs -d _build/doctrees-readthedocs -D language=en . _build/html diff --git a/version.txt b/version.txt index 5a03fb737b..885415662f 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.20.0 +0.21.0