diff --git a/.github/release_workflow.md b/.github/release_workflow.md index a655f9fe..97fc65d6 100644 --- a/.github/release_workflow.md +++ b/.github/release_workflow.md @@ -11,6 +11,7 @@ To create a new release, follow these steps: - Increment the following; - The version number in the `pyproject.toml` and `CITATION.cff` files following CalVer versioning. - The`CHANGELOG.md` version with the changes for the new version. + - Add a new entry for the documentation site version switcher located at `docs/_static/switcher.json` - Open a PR to the `main` branch. Once the PR is merged, proceed to the next step. 2. **Tag the Release:** diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index 63771150..704c7975 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -22,6 +22,11 @@ jobs: runs-on: [self-hosted, macOS, ARM64] if: github.repository == 'pybop-team/PyBOP' steps: + - name: Cleanup build folder + run: | + rm -rf ./* || true + rm -rf ./.??* || true + - uses: actions/checkout@v4 - name: Install python & create virtualenv diff --git a/.github/workflows/release_action.yaml b/.github/workflows/release_action.yaml index 0e4ee04d..499b4002 100644 --- a/.github/workflows/release_action.yaml +++ b/.github/workflows/release_action.yaml @@ -72,7 +72,7 @@ jobs: ./dist/*.tar.gz ./dist/*.whl - name: Publish artifacts and signatures to GitHub Releases - uses: softprops/action-gh-release@9d7c94cfd0a1f3ed45544c887983e9fa900f0564 # v2.0.4 + uses: softprops/action-gh-release@v2 with: # `dist/` contains the built packages, and the # sigstore-produced signatures and certificates. diff --git a/.github/workflows/scheduled_tests.yaml b/.github/workflows/scheduled_tests.yaml index ade88188..159152f0 100644 --- a/.github/workflows/scheduled_tests.yaml +++ b/.github/workflows/scheduled_tests.yaml @@ -113,6 +113,11 @@ jobs: matrix: ${{fromJson(needs.filter_pybamm_matrix.outputs.filtered_pybop_matrix)}} steps: + - name: Cleanup build folder + run: | + rm -rf ./* || true + rm -rf ./.??* || true + - uses: actions/checkout@v4 - name: Install python & create virtualenv shell: bash diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 401b5338..82299a92 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ ci: repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.4.8" + rev: "v0.5.1" hooks: - id: ruff args: [--fix, --show-fixes] diff --git a/CHANGELOG.md b/CHANGELOG.md index 715fc23e..b2cb56fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,10 +3,24 @@ ## Features +## Bug Fixes + + +## Breaking Changes + +# [v24.6](https://github.com/pybop-team/PyBOP/tree/v24.6) - 2024-07-08 + +## Features + +- [#319](https://github.com/pybop-team/PyBOP/pull/319/) - Adds `CuckooSearch` optimiser with corresponding tests. +- [#359](https://github.com/pybop-team/PyBOP/pull/359/) - Aligning Inputs between problem, observer and model. +- [#379](https://github.com/pybop-team/PyBOP/pull/379) - Adds model.simulateS1 to weekly benchmarks. +- [#174](https://github.com/pybop-team/PyBOP/issues/174) - Adds new logo and updates Readme for accessibility. - [#316](https://github.com/pybop-team/PyBOP/pull/316) - Adds Adam with weight decay (AdamW) optimiser, adds depreciation warning for pints.Adam implementation. - [#271](https://github.com/pybop-team/PyBOP/issues/271) - Aligns the output of the optimisers via a generalisation of Result class. - [#315](https://github.com/pybop-team/PyBOP/pull/315) - Updates __init__ structure to remove circular import issues and minimises dependancy imports across codebase for faster PyBOP module import. Adds type-hints to BaseModel and refactors rebuild parameter variables. - [#236](https://github.com/pybop-team/PyBOP/issues/236) - Restructures the optimiser classes, adds a new optimisation API through direct construction and keyword arguments, and fixes the setting of `max_iterations`, and `_minimising`. Introduces `pybop.BaseOptimiser`, `pybop.BasePintsOptimiser`, and `pybop.BaseSciPyOptimiser` classes. +- [#322](https://github.com/pybop-team/PyBOP/pull/322) - Add `Parameters` class to store and access multiple parameters in one object. - [#321](https://github.com/pybop-team/PyBOP/pull/321) - Updates Prior classes with BaseClass, adds a `problem.sample_initial_conditions` method to improve stability of SciPy.Minimize optimiser. - [#249](https://github.com/pybop-team/PyBOP/pull/249) - Add WeppnerHuggins model and GITT example. - [#304](https://github.com/pybop-team/PyBOP/pull/304) - Decreases the testing suite completion time. @@ -24,7 +38,12 @@ ## Bug Fixes - +- [#393](https://github.com/pybop-team/PyBOP/pull/393) - General integration test fixes. Adds UserWarning when using Plot2d with prior generated bounds. +- [#338](https://github.com/pybop-team/PyBOP/pull/338) - Fixes GaussianLogLikelihood class, adds integration tests, updates non-bounded parameter implementation by applying bounds from priors and `boundary_multiplier` argument. Bugfixes to CMAES construction. +- [#339](https://github.com/pybop-team/PyBOP/issues/339) - Updates the calculation of the cyclable lithium capacity in the spme_max_energy example. +- [#387](https://github.com/pybop-team/PyBOP/issues/387) - Adds keys to ParameterSet and updates ECM OCV check. +- [#380](https://github.com/pybop-team/PyBOP/pull/380) - Restore self._boundaries construction for `pybop.PSO` +- [#372](https://github.com/pybop-team/PyBOP/pull/372) - Converts `np.array` to `np.asarray` for Numpy v2.0 support. - [#165](https://github.com/pybop-team/PyBOP/issues/165) - Stores the attempted and best parameter values and the best cost for each iteration in the log attribute of the optimiser and updates the associated plots. - [#354](https://github.com/pybop-team/PyBOP/issues/354) - Fixes the calculation of the gradient in the `RootMeanSquaredError` cost. - [#347](https://github.com/pybop-team/PyBOP/issues/347) - Resets options between MSMR tests to cope with a bug in PyBaMM v23.9 which is fixed in PyBaMM v24.1. @@ -38,6 +57,13 @@ - [#270](https://github.com/pybop-team/PyBOP/pull/270) - Updates PR template. - [#91](https://github.com/pybop-team/PyBOP/issues/91) - Adds a check on the number of parameters for CMAES and makes XNES the default optimiser. +## Breaking Changes + +- [#322](https://github.com/pybop-team/PyBOP/pull/322) - Add `Parameters` class to store and access multiple parameters in one object (API change). +- [#285](https://github.com/pybop-team/PyBOP/pull/285) - Drop support for Python 3.8. +- [#251](https://github.com/pybop-team/PyBOP/pull/251) - Drop support for PyBaMM v23.5 +- [#236](https://github.com/pybop-team/PyBOP/issues/236) - Restructures the optimiser classes (API change). + # [v24.3.1](https://github.com/pybop-team/PyBOP/tree/v24.3.1) - 2024-06-17 ## Features diff --git a/CITATION.cff b/CITATION.cff index a14af062..b5c83816 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -11,5 +11,5 @@ authors: family-names: Courtier - given-names: David family-names: Howey -version: "24.3.1" # Update this when you release a new version +version: "24.6" # Update this when you release a new version repository-code: 'https://www.github.com/pybop-team/pybop' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0f3a7f07..727c5c51 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -70,7 +70,7 @@ You now have everything you need to start making changes! ### C. Merging your changes with PyBOP 10. [Test your code!](#testing) -12. If you added a major new feature, perhaps it should be showcased in an [example notebook](#example-notebooks). +12. If you added a major new feature, perhaps it should be showcased in an [example notebook](https://github.com/pybop-team/PyBOP/tree/develop/examples/notebooks). 13. If you've added new functionality, please add additional tests to ensure ample code coverage in PyBOP. 13. When you feel your code is finished, or at least warrants serious discussion, create a [pull request](https://help.github.com/articles/about-pull-requests/) (PR) on [PyBOP's GitHub page](https://github.com/pybop-team/PyBOP). 14. Once a PR has been created, it will be reviewed by any member of the community. Changes might be suggested which you can make by simply adding new commits to the branch. When everything's finished, someone with the right GitHub permissions will merge your changes into PyBOP main repository. @@ -314,8 +314,6 @@ Configuration files: pyproject.toml ``` -Note that this file must be kept in sync with the version number in [pybop/**init**.py](https://github.com/pybop-team/PyBOP/blob/develop/pybop/__init__.py). - ### Continuous Integration using GitHub actions Each change pushed to the PyBOP GitHub repository will trigger the test and benchmark suites to be run, using [GitHub actions](https://github.com/features/actions). diff --git a/README.md b/README.md index 8fd09c0a..99f7a031 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@
- ![logo](https://raw.githubusercontent.com/pybop-team/PyBOP/develop/assets/Temp_Logo.png) + logo.svg + # Python Battery Optimisation and Parameterisation @@ -25,8 +26,7 @@ PyBOP provides a complete set of tools for parameterisation and optimisation of The diagram below shows the conceptual framework of PyBOP. This package is currently under development, so users can expect the API to evolve with future releases.

- - pybop_arch.svg + pybop_arch.svg

## Installation diff --git a/assets/PyBOP-high-level.svg b/assets/PyBOP-high-level.svg index 00f3428f..25de26fd 100644 --- a/assets/PyBOP-high-level.svg +++ b/assets/PyBOP-high-level.svg @@ -1,2906 +1,4149 @@ image/svg+xml + + + + + + + +80μ + +100μ + +120μ + +140μ + +160μ + +80μ + +100μ + +120μ + +140μ + +160μ + +100k + +200k + +300k + +400k + +500k + +600k + +700k + +800k + +900k + +Negative electrode thickness [m] + +Positive electrode thickness [m] + +Volumetric energy density [Wh.m-3]vs. electrode thicknesses Transport limitedGoldilocksregion + +ParameterIdentification + + + + +DesignOptimisation +Design OptimisationParameter -Identification + d="m -11.033,50303 c 0,-27781 22523,-50304 50304,-50304 H 1271428 c 27781,0 50304,22523 50304,50304 v 525611 c 0,27781 -22523,50304 -50304,50304 H 50292.967 c -27781,0 -50304,-22523 -50304,-50304 z" /> + + + +ExperimentalData +inverse modelling +forward models +Physics and Empirical Models +Funding / Friend Projects +parameters +simulations + diff --git a/assets/Temp_Logo.png b/assets/Temp_Logo.png deleted file mode 100644 index 4ef2853b..00000000 Binary files a/assets/Temp_Logo.png and /dev/null differ diff --git a/assets/logo/PyBOP_logo_flat.png b/assets/logo/PyBOP_logo_flat.png new file mode 100644 index 00000000..8e0fb139 Binary files /dev/null and b/assets/logo/PyBOP_logo_flat.png differ diff --git a/assets/logo/PyBOP_logo_flat.svg b/assets/logo/PyBOP_logo_flat.svg new file mode 100644 index 00000000..64fdafe1 --- /dev/null +++ b/assets/logo/PyBOP_logo_flat.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_flat_inverse.png b/assets/logo/PyBOP_logo_flat_inverse.png new file mode 100644 index 00000000..f8aa5817 Binary files /dev/null and b/assets/logo/PyBOP_logo_flat_inverse.png differ diff --git a/assets/logo/PyBOP_logo_flat_inverse.svg b/assets/logo/PyBOP_logo_flat_inverse.svg new file mode 100644 index 00000000..d8c3e651 --- /dev/null +++ b/assets/logo/PyBOP_logo_flat_inverse.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_inverse.png b/assets/logo/PyBOP_logo_inverse.png new file mode 100644 index 00000000..62a4cf7d Binary files /dev/null and b/assets/logo/PyBOP_logo_inverse.png differ diff --git a/assets/logo/PyBOP_logo_inverse.svg b/assets/logo/PyBOP_logo_inverse.svg new file mode 100644 index 00000000..e4ced905 --- /dev/null +++ b/assets/logo/PyBOP_logo_inverse.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark.png b/assets/logo/PyBOP_logo_mark.png new file mode 100644 index 00000000..6fce2b94 Binary files /dev/null and b/assets/logo/PyBOP_logo_mark.png differ diff --git a/assets/logo/PyBOP_logo_mark.svg b/assets/logo/PyBOP_logo_mark.svg new file mode 100644 index 00000000..559ef406 --- /dev/null +++ b/assets/logo/PyBOP_logo_mark.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark_circle.png b/assets/logo/PyBOP_logo_mark_circle.png new file mode 100644 index 00000000..adc77e6b Binary files /dev/null and b/assets/logo/PyBOP_logo_mark_circle.png differ diff --git a/assets/logo/PyBOP_logo_mark_circle.svg b/assets/logo/PyBOP_logo_mark_circle.svg new file mode 100644 index 00000000..1f4ba833 --- /dev/null +++ b/assets/logo/PyBOP_logo_mark_circle.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark_mono.png b/assets/logo/PyBOP_logo_mark_mono.png new file mode 100644 index 00000000..bec070d0 Binary files /dev/null and b/assets/logo/PyBOP_logo_mark_mono.png differ diff --git a/assets/logo/PyBOP_logo_mark_mono.svg b/assets/logo/PyBOP_logo_mark_mono.svg new file mode 100644 index 00000000..204f6e2f --- /dev/null +++ b/assets/logo/PyBOP_logo_mark_mono.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark_mono_inverse.png b/assets/logo/PyBOP_logo_mark_mono_inverse.png new file mode 100644 index 00000000..3d4cc15f Binary files /dev/null and b/assets/logo/PyBOP_logo_mark_mono_inverse.png differ diff --git a/assets/logo/PyBOP_logo_mark_mono_inverse.svg b/assets/logo/PyBOP_logo_mark_mono_inverse.svg new file mode 100644 index 00000000..ab859856 --- /dev/null +++ b/assets/logo/PyBOP_logo_mark_mono_inverse.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mono.png b/assets/logo/PyBOP_logo_mono.png new file mode 100644 index 00000000..af9be830 Binary files /dev/null and b/assets/logo/PyBOP_logo_mono.png differ diff --git a/assets/logo/PyBOP_logo_mono.svg b/assets/logo/PyBOP_logo_mono.svg new file mode 100644 index 00000000..b135967e --- /dev/null +++ b/assets/logo/PyBOP_logo_mono.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mono_inverse.png b/assets/logo/PyBOP_logo_mono_inverse.png new file mode 100644 index 00000000..cc9030c6 Binary files /dev/null and b/assets/logo/PyBOP_logo_mono_inverse.png differ diff --git a/assets/logo/PyBOP_logo_mono_inverse.svg b/assets/logo/PyBOP_logo_mono_inverse.svg new file mode 100644 index 00000000..830bea1a --- /dev/null +++ b/assets/logo/PyBOP_logo_mono_inverse.svg @@ -0,0 +1 @@ + diff --git a/benchmarks/benchmark_model.py b/benchmarks/benchmark_model.py index df0335c2..843b03bc 100644 --- a/benchmarks/benchmark_model.py +++ b/benchmarks/benchmark_model.py @@ -81,3 +81,13 @@ def time_model_simulate(self, model, parameter_set): parameter_set (str): The name of the parameter set being used. """ self.problem._model.simulate(inputs=self.inputs, t_eval=self.t_eval) + + def time_model_simulateS1(self, model, parameter_set): + """ + Benchmark the simulateS1 method of the model. + + Args: + model (pybop.Model): The model class being benchmarked. + parameter_set (str): The name of the parameter set being used. + """ + self.problem._model.simulateS1(inputs=self.inputs, t_eval=self.t_eval) diff --git a/docs/_static/switcher.json b/docs/_static/switcher.json index 5db09bc1..2847bc26 100644 --- a/docs/_static/switcher.json +++ b/docs/_static/switcher.json @@ -4,9 +4,19 @@ "url": "https://pybop-docs.readthedocs.io/en/latest/" }, { - "name": "v23.12 (stable)", - "version": "v23.12", - "url": "https://pybop-docs.readthedocs.io/en/v23.12/", + "name": "v24.6 (stable)", + "version": "v24.6", + "url": "https://pybop-docs.readthedocs.io/en/v24.6/", "preferred": true + }, + { + "name": "v24.3", + "version": "v24.3", + "url": "https://pybop-docs.readthedocs.io/en/v24.3/" + }, + { + "name": "v23.12", + "version": "v23.12", + "url": "https://pybop-docs.readthedocs.io/en/v23.12/" } ] diff --git a/docs/_templates/autoapi/index.rst b/docs/_templates/autoapi/index.rst index d6075995..7cc11171 100644 --- a/docs/_templates/autoapi/index.rst +++ b/docs/_templates/autoapi/index.rst @@ -15,4 +15,6 @@ This page contains auto-generated API reference documentation [#f1]_. {% endif %} {% endfor %} + pybop/index + .. [#f1] Created with `sphinx-autoapi `_ diff --git a/docs/conf.py b/docs/conf.py index cfc37a90..df93a8fa 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -39,7 +39,7 @@ # -- Options for autoapi ------------------------------------------------------- autoapi_type = "python" autoapi_dirs = ["../pybop"] -autoapi_keep_files = True +autoapi_keep_files = False autoapi_root = "api" autoapi_member_order = "groupwise" diff --git a/docs/index.md b/docs/index.md index d45182ae..4d562165 100644 --- a/docs/index.md +++ b/docs/index.md @@ -8,7 +8,7 @@ html_theme.sidebar_secondary.remove: true # PyBOP: Optimise and Parameterise Battery Models -Welcome to PyBOP, a Python package dedicated to the optimization and parameterization of battery models. PyBOP is designed to streamline your workflow, whether you are conducting academic research, working in industry, or simply interested in battery technology and modelling. +Welcome to PyBOP, a Python package dedicated to the optimisation and parameterisation of battery models. PyBOP is designed to streamline your workflow, whether you are conducting academic research, working in industry, or simply interested in battery technology and modelling. ```{gallery-grid} :grid-columns: 1 2 2 2 diff --git a/docs/installation.rst b/docs/installation.rst index 3c0080c1..6bc3169c 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation ***************************** -PyBOP is a versatile Python package designed for optimization and parameterization of battery models. Follow the instructions below to install PyBOP and set up your environment to begin utilizing its capabilities. +PyBOP is a versatile Python package designed for optimisation and parameterisation of battery models. Follow the instructions below to install PyBOP and set up your environment to begin utilising its capabilities. Installing PyBOP with pip ------------------------- @@ -55,7 +55,7 @@ To verify that PyBOP has been installed successfully, try running one of the pro For Developers -------------- -If you are installing PyBOP for development purposes, such as contributing to the project, please ensure that you follow the guidelines outlined in the `Contributing Guide <../Contributing.html>`_. It includes additional steps that might be necessary for setting up a development environment, including the installation of dependencies and setup of pre-commit hooks. +If you are installing PyBOP for development purposes, such as contributing to the project, please ensure that you follow the guidelines outlined in the `Contributing Guide `_. It includes additional steps that might be necessary for setting up a development environment, including the installation of dependencies and setup of pre-commit hooks. Further Assistance ------------------ @@ -68,4 +68,4 @@ Next Steps After installing PyBOP, you might want to: * Explore the `Quick Start Guide `_ to begin using PyBOP. -* Check out the `API Reference <../api/index.html>`_ for detailed information on PyBOP's programming interface. +* Check out the `API Reference `_ for detailed information on PyBOP's programming interface. diff --git a/docs/quick_start.rst b/docs/quick_start.rst index 683c82b4..1a1617bd 100644 --- a/docs/quick_start.rst +++ b/docs/quick_start.rst @@ -6,7 +6,7 @@ Welcome to the Quick Start Guide for PyBOP. This guide will help you get up and Getting Started with PyBOP -------------------------- -PyBOP is equipped with a series of robust tools that can help you optimize various parameters within your battery models to better match empirical data or to explore the effects of different parameters on battery behavior. +PyBOP is equipped with a series of robust tools that can help you optimise various parameters within your battery models to better match empirical data or to explore the effects of different parameters on battery behavior. To begin using PyBOP: @@ -24,14 +24,14 @@ To begin using PyBOP: import pybop - Now you're ready to utilize PyBOP's functionality in your projects! + Now you're ready to utilise PyBOP's functionality in your projects! Exploring Examples ------------------ To help you get acquainted with PyBOP's capabilities, we provide a collection of examples that demonstrate common use cases and features of the package: -- **Jupyter Notebooks**: Interactive notebooks that include detailed explanations alongside the live code, visualizations, and results. These are an excellent resource for learning and can be easily modified and executed to suit your needs. +- **Jupyter Notebooks**: Interactive notebooks that include detailed explanations alongside the live code, visualisations, and results. These are an excellent resource for learning and can be easily modified and executed to suit your needs. - **Python Scripts**: For those who prefer working in a text editor, IDE, or for integrating into larger projects, we provide equivalent examples in plain Python script format. @@ -55,4 +55,4 @@ If you encounter any issues or have questions as you start using PyBOP, don't he - **GitHub Issues**: Report bugs or request new features by opening an `Issue `_ - **GitHub Discussions**: Post your questions or feedback on our `GitHub Discussions `_ -- **Contributions**: Interested in contributing to PyBOP? Check out our `Contributing Guide <../Contributing.html>`_ for guidelines. +- **Contributions**: Interested in contributing to PyBOP? Check out our `Contributing Guide `_ for guidelines. diff --git a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb index 365eb6e1..9fd084dd 100644 --- a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb +++ b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb @@ -1641,7 +1641,7 @@ "source": [ "optim = pybop.PSO(cost, max_unchanged_iterations=55, threshold=1e-6)\n", "x, final_cost = optim.run()\n", - "print(\"Initial parameters:\", cost.x0)\n", + "print(\"Initial parameters:\", optim.x0)\n", "print(\"Estimated parameters:\", x)" ] }, @@ -1679,7 +1679,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -1850,7 +1850,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Parameter Extrapolation\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Parameter Extrapolation\");" ] }, { diff --git a/examples/notebooks/equivalent_circuit_identification.ipynb b/examples/notebooks/equivalent_circuit_identification.ipynb index 8a13a199..6184c191 100644 --- a/examples/notebooks/equivalent_circuit_identification.ipynb +++ b/examples/notebooks/equivalent_circuit_identification.ipynb @@ -419,7 +419,7 @@ "source": [ "optim = pybop.CMAES(cost, max_iterations=300)\n", "x, final_cost = optim.run()\n", - "print(\"Initial parameters:\", cost.x0)\n", + "print(\"Initial parameters:\", optim.x0)\n", "print(\"Estimated parameters:\", x)" ] }, @@ -457,7 +457,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/multi_model_identification.ipynb b/examples/notebooks/multi_model_identification.ipynb index 699b2eda..a66a78f2 100644 --- a/examples/notebooks/multi_model_identification.ipynb +++ b/examples/notebooks/multi_model_identification.ipynb @@ -3905,7 +3905,7 @@ "source": [ "for optim, x in zip(optims, xs):\n", " pybop.quick_plot(\n", - " optim.cost.problem, parameter_values=x, title=optim.cost.problem.model.name\n", + " optim.cost.problem, problem_inputs=x, title=optim.cost.problem.model.name\n", " )" ] }, @@ -3958,7 +3958,7 @@ } ], "source": [ - "bounds = np.array([[5.5e-05, 8e-05], [7.5e-05, 9e-05]])\n", + "bounds = np.asarray([[5.5e-05, 8e-05], [7.5e-05, 9e-05]])\n", "for optim in optims:\n", " pybop.plot2d(optim, bounds=bounds, steps=10, title=optim.cost.problem.model.name)" ] diff --git a/examples/notebooks/multi_optimiser_identification.ipynb b/examples/notebooks/multi_optimiser_identification.ipynb index f85b2609..c4fc3b92 100644 --- a/examples/notebooks/multi_optimiser_identification.ipynb +++ b/examples/notebooks/multi_optimiser_identification.ipynb @@ -36,27 +36,42 @@ "name": "stdout", "output_type": "stream", "text": [ - "Requirement already satisfied: pip in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (24.0)\n", - "Requirement already satisfied: ipywidgets in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (8.1.2)\n", - "Requirement already satisfied: comm>=0.1.3 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (0.2.2)\n", - "Requirement already satisfied: ipython>=6.1.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (8.23.0)\n", - "Requirement already satisfied: traitlets>=4.3.1 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (5.14.2)\n", - "Requirement already satisfied: widgetsnbextension~=4.0.10 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (4.0.10)\n", - "Requirement already satisfied: jupyterlab-widgets~=3.0.10 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipywidgets) (3.0.10)\n", - "Requirement already satisfied: decorator in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (5.1.1)\n", - "Requirement already satisfied: jedi>=0.16 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.19.1)\n", - "Requirement already satisfied: matplotlib-inline in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.1.6)\n", - "Requirement already satisfied: prompt-toolkit<3.1.0,>=3.0.41 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (3.0.43)\n", - "Requirement already satisfied: pygments>=2.4.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (2.17.2)\n", - "Requirement already satisfied: stack-data in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.6.3)\n", - "Requirement already satisfied: pexpect>4.3 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (4.9.0)\n", - "Requirement already satisfied: parso<0.9.0,>=0.8.3 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets) (0.8.4)\n", - "Requirement already satisfied: ptyprocess>=0.5 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets) (0.7.0)\n", - "Requirement already satisfied: wcwidth in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from prompt-toolkit<3.1.0,>=3.0.41->ipython>=6.1.0->ipywidgets) (0.2.13)\n", - "Requirement already satisfied: executing>=1.2.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.0.1)\n", - "Requirement already satisfied: asttokens>=2.1.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.4.1)\n", - "Requirement already satisfied: pure-eval in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (0.2.2)\n", - "Requirement already satisfied: six>=1.12.0 in /Users/engs2510/.pyenv/versions/3.12.2/envs/pybop-3.12/lib/python3.12/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets) (1.16.0)\n", + "Requirement already satisfied: pip in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (24.0)\n", + "Collecting pip\n", + " Using cached pip-24.1.1-py3-none-any.whl.metadata (3.6 kB)\n", + "Collecting ipywidgets\n", + " Using cached ipywidgets-8.1.3-py3-none-any.whl.metadata (2.4 kB)\n", + "Requirement already satisfied: comm>=0.1.3 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipywidgets) (0.2.2)\n", + "Requirement already satisfied: ipython>=6.1.0 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipywidgets) (8.26.0)\n", + "Requirement already satisfied: traitlets>=4.3.1 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipywidgets) (5.14.3)\n", + "Collecting widgetsnbextension~=4.0.11 (from ipywidgets)\n", + " Using cached widgetsnbextension-4.0.11-py3-none-any.whl.metadata (1.6 kB)\n", + "Collecting jupyterlab-widgets~=3.0.11 (from ipywidgets)\n", + " Using cached jupyterlab_widgets-3.0.11-py3-none-any.whl.metadata (4.1 kB)\n", + "Requirement already satisfied: decorator in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (5.1.1)\n", + "Requirement already satisfied: jedi>=0.16 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.19.1)\n", + "Requirement already satisfied: matplotlib-inline in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.1.7)\n", + "Requirement already satisfied: prompt-toolkit<3.1.0,>=3.0.41 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (3.0.47)\n", + "Requirement already satisfied: pygments>=2.4.0 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (2.18.0)\n", + "Requirement already satisfied: stack-data in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.6.3)\n", + "Requirement already satisfied: pexpect>4.3 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (4.9.0)\n", + "Requirement already satisfied: parso<0.9.0,>=0.8.3 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets) (0.8.4)\n", + "Requirement already satisfied: ptyprocess>=0.5 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets) (0.7.0)\n", + "Requirement already satisfied: wcwidth in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from prompt-toolkit<3.1.0,>=3.0.41->ipython>=6.1.0->ipywidgets) (0.2.13)\n", + "Requirement already satisfied: executing>=1.2.0 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.0.1)\n", + "Requirement already satisfied: asttokens>=2.1.0 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.4.1)\n", + "Requirement already satisfied: pure-eval in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (0.2.2)\n", + "Requirement already satisfied: six>=1.12.0 in /home/engs2510/.pyenv/versions/3.12.2/envs/pybop/lib/python3.12/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets) (1.16.0)\n", + "Using cached pip-24.1.1-py3-none-any.whl (1.8 MB)\n", + "Using cached ipywidgets-8.1.3-py3-none-any.whl (139 kB)\n", + "Using cached jupyterlab_widgets-3.0.11-py3-none-any.whl (214 kB)\n", + "Using cached widgetsnbextension-4.0.11-py3-none-any.whl (2.3 MB)\n", + "Installing collected packages: widgetsnbextension, pip, jupyterlab-widgets, ipywidgets\n", + " Attempting uninstall: pip\n", + " Found existing installation: pip 24.0\n", + " Uninstalling pip-24.0:\n", + " Successfully uninstalled pip-24.0\n", + "Successfully installed ipywidgets-8.1.3 jupyterlab-widgets-3.0.11 pip-24.1.1 widgetsnbextension-4.0.11\n", "Note: you may need to restart the kernel to use updated packages.\n", "Note: you may need to restart the kernel to use updated packages.\n" ] @@ -307,6 +322,7 @@ " pybop.PSO,\n", " pybop.XNES,\n", " pybop.NelderMead,\n", + " pybop.CuckooSearch,\n", "]\n", "\n", "scipy_optimisers = [\n", @@ -346,7 +362,147 @@ "Running AdamW\n", "NOTE: Boundaries ignored by AdamW\n", "Running GradientDescent\n", - "NOTE: Boundaries ignored by Gradient Descent\n", + "NOTE: Boundaries ignored by \n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", + "Error: Error in Function::call for 'event_0' [MXFunction] at .../casadi/core/function.cpp:361:\n", + ".../casadi/core/function_internal.hpp:1649: Input 1 (i1) has mismatching shape. Got 100-by-1. Allowed dimensions, in general, are:\n", + " - The input dimension N-by-M (here 300-by-1)\n", + " - A scalar, i.e. 1-by-1\n", + " - M-by-N if N=1 or M=1 (i.e. a transposed vector)\n", + " - N-by-M1 if K*M1=M for some K (argument repeated horizontally)\n", + " - N-by-P*M, indicating evaluation with multiple arguments (P must be a multiple of 1 for consistency with previous inputs)\n", "Running IRPropMin\n" ] } @@ -378,7 +534,8 @@ "Running PSO\n", "Running XNES\n", "Running NelderMead\n", - "NOTE: Boundaries ignored by NelderMead\n" + "NOTE: Boundaries ignored by \n", + "Running CuckooSearch\n" ] } ], @@ -441,16 +598,17 @@ "name": "stdout", "output_type": "stream", "text": [ - "| Optimiser: AdamW | Results: [0.80186169 0.66943058] |\n", - "| Optimiser: Gradient descent | Results: [0.44491146 1.59642543] |\n", - "| Optimiser: iRprop- | Results: [0.8 0.66516386] |\n", - "| Optimiser: Covariance Matrix Adaptation Evolution Strategy (CMA-ES) | Results: [0.7999994 0.66516056] |\n", - "| Optimiser: Seperable Natural Evolution Strategy (SNES) | Results: [0.79672265 0.66566242] |\n", - "| Optimiser: Particle Swarm Optimisation (PSO) | Results: [0.79978922 0.66557426] |\n", - "| Optimiser: Exponential Natural Evolution Strategy (xNES) | Results: [0.79992605 0.66513294] |\n", - "| Optimiser: Nelder-Mead | Results: [0.81389091 0.66318217] |\n", - "| Optimiser: SciPyMinimize | Results: [0.63594266 0.7 ] |\n", - "| Optimiser: SciPyDifferentialEvolution | Results: [0.79999973 0.6651644 ] |\n" + "| Optimiser: AdamW | Results: [0.79283046 0.66146761] |\n", + "| Optimiser: Gradient descent | Results: [0.54971799 0.92691691] |\n", + "| Optimiser: iRprop- | Results: [0.72245096 0.67281911] |\n", + "| Optimiser: Covariance Matrix Adaptation Evolution Strategy (CMA-ES) | Results: [0.72099365 0.67312846] |\n", + "| Optimiser: Seperable Natural Evolution Strategy (SNES) | Results: [0.72092695 0.67313321] |\n", + "| Optimiser: Particle Swarm Optimisation (PSO) | Results: [0.71681934 0.67366943] |\n", + "| Optimiser: Exponential Natural Evolution Strategy (xNES) | Results: [0.71352763 0.67470134] |\n", + "| Optimiser: Nelder-Mead | Results: [0.72127038 0.67308243] |\n", + "| Optimiser: Cuckoo Search | Results: [0.70772893 0.67571981] |\n", + "| Optimiser: SciPyMinimize | Results: [0.62747952 0.7 ] |\n", + "| Optimiser: SciPyDifferentialEvolution | Results: [0.72100138 0.67312735] |\n" ] } ], @@ -509,7 +667,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelAdamWTime / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelAdamWTime / sVoltage / V" ] }, "metadata": {}, @@ -518,7 +676,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelGradient descentTime / sVoltage / V" + "05001000150020003.53.63.73.83.944.1ReferenceModelGradient descentTime / sVoltage / V" ] }, "metadata": {}, @@ -527,7 +685,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModeliRprop-Time / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModeliRprop-Time / sVoltage / V" ] }, "metadata": {}, @@ -536,7 +694,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelCovariance Matrix Adaptation Evolution Strategy (CMA-ES)Time / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelCovariance Matrix Adaptation Evolution Strategy (CMA-ES)Time / sVoltage / V" ] }, "metadata": {}, @@ -545,7 +703,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelSeperable Natural Evolution Strategy (SNES)Time / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelSeperable Natural Evolution Strategy (SNES)Time / sVoltage / V" ] }, "metadata": {}, @@ -554,7 +712,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelParticle Swarm Optimisation (PSO)Time / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelParticle Swarm Optimisation (PSO)Time / sVoltage / V" ] }, "metadata": {}, @@ -563,7 +721,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelExponential Natural Evolution Strategy (xNES)Time / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelExponential Natural Evolution Strategy (xNES)Time / sVoltage / V" ] }, "metadata": {}, @@ -572,7 +730,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelNelder-MeadTime / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelNelder-MeadTime / sVoltage / V" ] }, "metadata": {}, @@ -581,7 +739,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelSciPyMinimizeTime / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelCuckoo SearchTime / sVoltage / V" ] }, "metadata": {}, @@ -590,7 +748,16 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelSciPyDifferentialEvolutionTime / sVoltage / V" + "05001000150020003.53.63.73.83.94ReferenceModelSciPyMinimizeTime / sVoltage / V" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "05001000150020003.53.63.73.83.94ReferenceModelSciPyDifferentialEvolutionTime / sVoltage / V" ] }, "metadata": {}, @@ -599,7 +766,7 @@ ], "source": [ "for optim, x in zip(optims, xs):\n", - " pybop.quick_plot(optim.cost.problem, parameter_values=x, title=optim.name())" + " pybop.quick_plot(optim.cost.problem, problem_inputs=x, title=optim.name())" ] }, { @@ -627,7 +794,16 @@ { "data": { "image/svg+xml": [ - "51015202530012345AdamWIterationCost" + "5101520253000.511.522.533.5AdamWIterationCost" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "1020300.60.650.70.750.80.851020300.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -636,7 +812,7 @@ { "data": { "image/svg+xml": [ - "05101520250.60.650.70.750.80.8505101520250.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "510152011.522.533.5Gradient descentIterationCost" ] }, "metadata": {}, @@ -645,7 +821,7 @@ { "data": { "image/svg+xml": [ - "510152024681012141618Gradient descentIterationCost" + "510152000.10.20.30.40.50.60.70.851015200.40.50.60.70.80.911.11.21.3Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -654,7 +830,7 @@ { "data": { "image/svg+xml": [ - "0510150510152025300510150.511.522.5Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "10203040506000.511.522.533.5iRprop-IterationCost" ] }, "metadata": {}, @@ -663,7 +839,7 @@ { "data": { "image/svg+xml": [ - "10203040012345iRprop-IterationCost" + "2040600.650.70.750.82040600.50.550.60.65Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -672,7 +848,7 @@ { "data": { "image/svg+xml": [ - "0102030400.60.650.70.750.80102030400.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "1020304000.511.522.53Covariance Matrix Adaptation Evolution Strategy (CMA-ES)IterationCost" ] }, "metadata": {}, @@ -681,7 +857,7 @@ { "data": { "image/svg+xml": [ - "1020304000.511.522.53Covariance Matrix Adaptation Evolution Strategy (CMA-ES)IterationCost" + "501001502002500.50.550.60.650.70.750.8501001502002500.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -690,7 +866,7 @@ { "data": { "image/svg+xml": [ - "0501001502002500.550.60.650.70.750.80501001502002500.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "10203040506000.511.522.5Seperable Natural Evolution Strategy (SNES)IterationCost" ] }, "metadata": {}, @@ -699,7 +875,7 @@ { "data": { "image/svg+xml": [ - "10203040506000.511.522.533.5Seperable Natural Evolution Strategy (SNES)IterationCost" + "1002003000.550.60.650.70.750.81002003000.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -708,7 +884,7 @@ { "data": { "image/svg+xml": [ - "01002003000.550.60.650.70.750.801002003000.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "10203040500.0030.00350.0040.00450.005Particle Swarm Optimisation (PSO)IterationCost" ] }, "metadata": {}, @@ -717,7 +893,7 @@ { "data": { "image/svg+xml": [ - "1020304000.511.522.5Particle Swarm Optimisation (PSO)IterationCost" + "501001502002500.50.550.60.650.70.750.8501001502002500.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -726,7 +902,7 @@ { "data": { "image/svg+xml": [ - "0501001502000.50.550.60.650.70.750.80501001502000.40.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "10203040506000.511.52Exponential Natural Evolution Strategy (xNES)IterationCost" ] }, "metadata": {}, @@ -735,7 +911,7 @@ { "data": { "image/svg+xml": [ - "10203040506000.511.522.5Exponential Natural Evolution Strategy (xNES)IterationCost" + "1002003000.540.560.580.60.620.640.660.680.70.721002003000.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -744,7 +920,7 @@ { "data": { "image/svg+xml": [ - "01002003000.60.650.70.750.801002003000.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "10203040506000.511.522.533.5Nelder-MeadIterationCost" ] }, "metadata": {}, @@ -753,7 +929,7 @@ { "data": { "image/svg+xml": [ - "10203040506000.511.522.533.5Nelder-MeadIterationCost" + "2040600.620.640.660.680.70.722040600.50.550.60.650.70.75Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -762,7 +938,7 @@ { "data": { "image/svg+xml": [ - "02040600.60.650.70.750.80.8502040600.450.50.550.60.650.70.750.80.850.9Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "1020304050600.0030.00350.0040.00450.005Cuckoo SearchIterationCost" ] }, "metadata": {}, @@ -771,7 +947,7 @@ { "data": { "image/svg+xml": [ - "510152025012345SciPyMinimizeIterationCost" + "1002003000.50.550.60.650.70.750.81002003000.40.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -780,7 +956,7 @@ { "data": { "image/svg+xml": [ - "05101520250.60.610.620.630.640.650.660.6705101520250.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "510152000.10.20.30.40.50.6SciPyMinimizeIterationCost" ] }, "metadata": {}, @@ -789,7 +965,7 @@ { "data": { "image/svg+xml": [ - "510152025300.00360.00380.0040.00420.0044SciPyDifferentialEvolutionIterationCost" + "102030400.560.580.60.620.640.660.680.70.72102030400.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -798,7 +974,16 @@ { "data": { "image/svg+xml": [ - "05101520250.7550.760.7650.770.7750.780.7850.790.7950.805101520250.6650.6660.6670.6680.6690.67Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "51015200.00290.0030.00310.00320.00330.00340.0035SciPyDifferentialEvolutionIterationCost" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "2004006000.50.550.60.650.70.750.82004006000.40.450.50.550.60.650.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -835,7 +1020,16 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5AdamWNegative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4AdamWNegative electrode active material volume fractionPositive electrode active material volume fraction" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4Gradient descentNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -844,7 +1038,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5Gradient descentNegative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4iRprop-Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -853,7 +1047,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5iRprop-Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4Covariance Matrix Adaptation Evolution Strategy (CMA-ES)Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -862,7 +1056,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5Covariance Matrix Adaptation Evolution Strategy (CMA-ES)Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4Seperable Natural Evolution Strategy (SNES)Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -871,7 +1065,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5Seperable Natural Evolution Strategy (SNES)Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4Particle Swarm Optimisation (PSO)Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -880,7 +1074,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5Particle Swarm Optimisation (PSO)Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4Exponential Natural Evolution Strategy (xNES)Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -889,7 +1083,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5Exponential Natural Evolution Strategy (xNES)Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4Nelder-MeadNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -898,7 +1092,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5Nelder-MeadNegative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4Cuckoo SearchNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -907,7 +1101,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5SciPyMinimizeNegative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4SciPyMinimizeNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -916,7 +1110,7 @@ { "data": { "image/svg+xml": [ - "0.50.550.60.650.70.750.80.50.550.60.650.70.750.80.511.522.533.5SciPyDifferentialEvolutionNegative electrode active material volume fractionPositive electrode active material volume fraction" + "0.50.550.60.650.70.750.80.550.60.650.70.750.80.40.81.21.622.4SciPyDifferentialEvolutionNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -925,7 +1119,7 @@ ], "source": [ "# Plot the cost landscape with optimisation path and updated bounds\n", - "bounds = np.array([[0.5, 0.8], [0.55, 0.8]])\n", + "bounds = np.asarray([[0.5, 0.8], [0.55, 0.8]])\n", "for optim in optims:\n", " pybop.plot2d(optim, bounds=bounds, steps=10, title=optim.name())" ] diff --git a/examples/notebooks/optimiser_calibration.ipynb b/examples/notebooks/optimiser_calibration.ipynb index 3199fadb..3b09cd37 100644 --- a/examples/notebooks/optimiser_calibration.ipynb +++ b/examples/notebooks/optimiser_calibration.ipynb @@ -126,9 +126,24 @@ "outputs": [], "source": [ "parameter_set = pybop.ParameterSet.pybamm(\"Chen2020\")\n", + "parameter_set.update(\n", + " {\n", + " \"Negative electrode active material volume fraction\": 0.65,\n", + " \"Positive electrode active material volume fraction\": 0.51,\n", + " }\n", + ")\n", "model = pybop.lithium_ion.SPM(parameter_set=parameter_set)\n", - "t_eval = np.arange(0, 900, 3)\n", - "values = model.predict(t_eval=t_eval)" + "init_soc = 0.4\n", + "experiment = pybop.Experiment(\n", + " [\n", + " (\n", + " \"Discharge at 0.5C for 6 minutes (4 second period)\",\n", + " \"Charge at 0.5C for 6 minutes (4 second period)\",\n", + " ),\n", + " ]\n", + " * 2\n", + ")\n", + "values = model.predict(init_soc=init_soc, experiment=experiment)" ] }, { @@ -153,8 +168,10 @@ }, "outputs": [], "source": [ - "sigma = 0.001\n", - "corrupt_values = values[\"Voltage [V]\"].data + np.random.normal(0, sigma, len(t_eval))" + "sigma = 0.002\n", + "corrupt_values = values[\"Voltage [V]\"].data + np.random.normal(\n", + " 0, sigma, len(values[\"Voltage [V]\"].data)\n", + ")" ] }, { @@ -200,7 +217,7 @@ "source": [ "dataset = pybop.Dataset(\n", " {\n", - " \"Time [s]\": t_eval,\n", + " \"Time [s]\": values[\"Time [s]\"].data,\n", " \"Current function [A]\": values[\"Current [A]\"].data,\n", " \"Voltage [V]\": corrupt_values,\n", " }\n", @@ -235,13 +252,15 @@ "parameters = pybop.Parameters(\n", " pybop.Parameter(\n", " \"Negative electrode active material volume fraction\",\n", - " prior=pybop.Gaussian(0.7, 0.025),\n", - " bounds=[0.6, 0.9],\n", + " prior=pybop.Uniform(0.45, 0.7),\n", + " bounds=[0.4, 0.8],\n", + " true_value=0.65,\n", " ),\n", " pybop.Parameter(\n", " \"Positive electrode active material volume fraction\",\n", - " prior=pybop.Gaussian(0.6, 0.025),\n", - " bounds=[0.5, 0.8],\n", + " prior=pybop.Uniform(0.45, 0.7),\n", + " bounds=[0.4, 0.8],\n", + " true_value=0.51,\n", " ),\n", ")" ] @@ -279,7 +298,7 @@ } ], "source": [ - "problem = pybop.FittingProblem(model, parameters, dataset)\n", + "problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc)\n", "cost = pybop.SumSquaredError(problem)\n", "optim = pybop.GradientDescent(cost, sigma0=0.2, max_iterations=100)" ] @@ -307,26 +326,7 @@ }, "id": "-9OVt0EQ04qB" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n", - "Error: Events ['Maximum voltage [V]'] are non-positive at initial conditions\n" - ] - } - ], + "outputs": [], "source": [ "x, final_cost = optim.run()" ] @@ -362,7 +362,7 @@ { "data": { "text/plain": [ - "array([0.70742414, 0.58383355])" + "array([0.64609807, 0.51472958])" ] }, "execution_count": 9, @@ -396,7 +396,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.753.83.853.93.9544.05ReferenceModelOptimised ComparisonTime / sVoltage / V" + "0500100015003.53.553.63.653.7ReferenceModelOptimised ComparisonTime / sVoltage / V" ] }, "metadata": {}, @@ -404,7 +404,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -434,26 +434,30 @@ "text": [ "0.001\n", "NOTE: Boundaries ignored by Gradient Descent\n", - "0.0045\n", + "0.012285714285714285\n", + "NOTE: Boundaries ignored by Gradient Descent\n", + "0.023571428571428573\n", + "NOTE: Boundaries ignored by Gradient Descent\n", + "0.03485714285714286\n", "NOTE: Boundaries ignored by Gradient Descent\n", - "0.008\n", + "0.046142857142857145\n", "NOTE: Boundaries ignored by Gradient Descent\n", - "0.0115\n", + "0.05742857142857143\n", "NOTE: Boundaries ignored by Gradient Descent\n", - "0.015\n", + "0.06871428571428571\n", + "NOTE: Boundaries ignored by Gradient Descent\n", + "0.08\n", "NOTE: Boundaries ignored by Gradient Descent\n" ] } ], "source": [ - "sigmas = np.linspace(\n", - " 0.001, 0.015, 5\n", - ") # Change this to a smaller range for a quicker run\n", + "sigmas = np.linspace(0.001, 0.08, 8) # Change this to a smaller range for a quicker run\n", "xs = []\n", "optims = []\n", "for sigma in sigmas:\n", " print(sigma)\n", - " problem = pybop.FittingProblem(model, parameters, dataset)\n", + " problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc)\n", " cost = pybop.SumSquaredError(problem)\n", " optim = pybop.GradientDescent(cost, sigma0=sigma, max_iterations=100)\n", " x, final_cost = optim.run()\n", @@ -477,11 +481,14 @@ "name": "stdout", "output_type": "stream", "text": [ - "| Sigma: 0.001 | Num Iterations: 100 | Best Cost: 0.0013289907848209911 | Results: [0.69535773 0.67509662] |\n", - "| Sigma: 0.0045 | Num Iterations: 100 | Best Cost: 0.0007218197918308683 | Results: [0.71892626 0.67060898] |\n", - "| Sigma: 0.008 | Num Iterations: 100 | Best Cost: 0.0006371022763628136 | Results: [0.72396797 0.6696914 ] |\n", - "| Sigma: 0.0115 | Num Iterations: 18 | Best Cost: 0.0004608694532019237 | Results: [0.74070995 0.6667419 ] |\n", - "| Sigma: 0.015 | Num Iterations: 100 | Best Cost: 0.0007468897676990436 | Results: [0.71758655 0.67085529] |\n" + "| Sigma: 0.001 | Num Iterations: 100 | Best Cost: 0.008590687346571011 | Results: [0.58273999 0.64430015] |\n", + "| Sigma: 0.012285714285714285 | Num Iterations: 100 | Best Cost: 0.0017482878947612424 | Results: [0.62229759 0.5406604 ] |\n", + "| Sigma: 0.023571428571428573 | Num Iterations: 100 | Best Cost: 0.0013871420979637958 | Results: [0.63941964 0.52140605] |\n", + "| Sigma: 0.03485714285714286 | Num Iterations: 100 | Best Cost: 0.001571369568098984 | Results: [0.62907481 0.53267599] |\n", + "| Sigma: 0.046142857142857145 | Num Iterations: 28 | Best Cost: 0.0013533853388748253 | Results: [0.64673791 0.51409832] |\n", + "| Sigma: 0.05742857142857143 | Num Iterations: 25 | Best Cost: 0.0013584031053821507 | Results: [0.64390064 0.51673076] |\n", + "| Sigma: 0.06871428571428571 | Num Iterations: 74 | Best Cost: 0.0013568172573032275 | Results: [0.64444354 0.51631924] |\n", + "| Sigma: 0.08 | Num Iterations: 73 | Best Cost: 0.0013551215844470215 | Results: [0.64505654 0.51551585] |\n" ] } ], @@ -514,7 +521,34 @@ { "data": { "image/svg+xml": [ - "2040608010000.050.10.150.2Sigma: 0.001IterationCost" + "204060801000.00850.0090.00950.010.01050.0110.01150.012Sigma: 0.001IterationCost" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "0204060800.5840.5860.5880.590.5920.5940204060800.6440.6460.6480.650.6520.6540.6560.658Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "2040608010000.0050.010.0150.020.0250.030.035Sigma: 0.012285714285714285IterationCost" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "0204060800.540.560.580.60.620204060800.520.5250.530.5350.540.5450.550.555Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -523,7 +557,7 @@ { "data": { "image/svg+xml": [ - "0204060800.680.6820.6840.6860.6880.690.6920.6940.6960204060800.610.620.630.640.650.660.67Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "2040608010000.020.040.060.080.1Sigma: 0.023571428571428573IterationCost" ] }, "metadata": {}, @@ -532,7 +566,7 @@ { "data": { "image/svg+xml": [ - "2040608010000.050.10.150.20.25Sigma: 0.0045IterationCost" + "0204060800.50.520.540.560.580.60.620.640204060800.450.460.470.480.490.50.510.520.530.54Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -541,7 +575,7 @@ { "data": { "image/svg+xml": [ - "0204060800.70.7050.710.7150.720204060800.60.610.620.630.640.650.660.67Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "204060801000.0050.010.0150.02Sigma: 0.03485714285714286IterationCost" ] }, "metadata": {}, @@ -550,7 +584,7 @@ { "data": { "image/svg+xml": [ - "2040608010000.050.10.150.20.25Sigma: 0.008IterationCost" + "0204060800.570.580.590.60.610.620.630204060800.540.560.580.60.620.640.660.680.7Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -559,7 +593,7 @@ { "data": { "image/svg+xml": [ - "0204060800.70.7050.710.7150.720.7250204060800.60.610.620.630.640.650.660.67Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "5101520250.0020.0040.0060.0080.010.0120.014Sigma: 0.046142857142857145IterationCost" ] }, "metadata": {}, @@ -568,7 +602,7 @@ { "data": { "image/svg+xml": [ - "5101500.010.020.030.04Sigma: 0.0115IterationCost" + "05101520250.650.660.670.680.6905101520250.520.530.540.550.56Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -577,7 +611,7 @@ { "data": { "image/svg+xml": [ - "0510150.7350.7360.7370.7380.7390.740.7410510150.6350.640.6450.650.6550.660.665Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "5101520250.001350.00140.001450.00150.001550.00160.00165Sigma: 0.05742857142857143IterationCost" ] }, "metadata": {}, @@ -586,7 +620,7 @@ { "data": { "image/svg+xml": [ - "2040608010000.10.20.30.40.5Sigma: 0.015IterationCost" + "051015200.6340.6360.6380.640.6420.644051015200.5150.51550.5160.51650.5170.51750.5180.51850.5190.5195Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -595,7 +629,34 @@ { "data": { "image/svg+xml": [ - "0204060800.660.670.680.690.70.710.720204060800.580.60.620.640.660.680.70.720.740.76Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + "1020304050607000.0050.010.0150.020.0250.030.0350.04Sigma: 0.06871428571428571IterationCost" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "02040600.610.620.630.640.650.660.670.680.6902040600.520.540.560.580.60.620.64Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "102030405060700.0020.0040.0060.0080.01Sigma: 0.08IterationCost" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "02040600.560.570.580.590.60.610.620.630.640.6502040600.520.530.540.550.560.57Negative electrode active material volume fractionPositive electrode active material volume fractionParameter ConvergenceFunction CallFunction CallNegative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -632,7 +693,34 @@ { "data": { "image/svg+xml": [ - "0.60.650.70.750.80.850.90.50.550.60.650.70.750.80.40.81.21.622.4Sigma: 0.001Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.001Negative electrode active material volume fractionPositive electrode active material volume fraction" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.012285714285714285Negative electrode active material volume fractionPositive electrode active material volume fraction" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.023571428571428573Negative electrode active material volume fractionPositive electrode active material volume fraction" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.03485714285714286Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -641,7 +729,7 @@ { "data": { "image/svg+xml": [ - "0.60.650.70.750.80.850.90.50.550.60.650.70.750.80.40.81.21.622.4Sigma: 0.0045Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.046142857142857145Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -650,7 +738,7 @@ { "data": { "image/svg+xml": [ - "0.60.650.70.750.80.850.90.50.550.60.650.70.750.80.40.81.21.622.4Sigma: 0.008Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.05742857142857143Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -659,7 +747,7 @@ { "data": { "image/svg+xml": [ - "0.60.650.70.750.80.850.90.50.550.60.650.70.750.80.40.81.21.622.4Sigma: 0.0115Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.06871428571428571Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -668,7 +756,7 @@ { "data": { "image/svg+xml": [ - "0.60.650.70.750.80.850.90.50.550.60.650.70.750.80.40.81.21.622.4Sigma: 0.015Negative electrode active material volume fractionPositive electrode active material volume fraction" + "0.40.50.60.70.80.40.450.50.550.60.650.70.750.80.10.20.30.4Sigma: 0.08Negative electrode active material volume fractionPositive electrode active material volume fraction" ] }, "metadata": {}, @@ -677,7 +765,7 @@ ], "source": [ "# Plot the cost landscape with optimisation path and updated bounds\n", - "bounds = np.array([[0.6, 0.9], [0.5, 0.8]])\n", + "bounds = np.array([[0.4, 0.8], [0.4, 0.8]])\n", "for optim, sigma in zip(optims, sigmas):\n", " pybop.plot2d(optim, bounds=bounds, steps=10, title=f\"Sigma: {sigma}\")" ] @@ -688,12 +776,12 @@ "source": [ "### Updating the Learning Rate\n", "\n", - "Let's take `sigma0 = 0.0115` as the best learning rate for this problem and look at the time-series trajectories." + "Let's take `sigma0 = 0.08` as the best learning rate for this problem and look at the time-series trajectories." ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "metadata": { "execution": { "iopub.execute_input": "2024-04-14T18:59:54.698068Z", @@ -713,7 +801,7 @@ { "data": { "image/svg+xml": [ - "02004006008003.83.853.93.9544.05ReferenceModelOptimised ComparisonTime / sVoltage / V" + "0500100015003.53.553.63.653.7ReferenceModelOptimised ComparisonTime / sVoltage / V" ] }, "metadata": {}, @@ -721,9 +809,9 @@ } ], "source": [ - "optim = pybop.GradientDescent(cost, sigma0=0.0115)\n", + "optim = pybop.Optimisation(cost, optimiser=pybop.GradientDescent, sigma0=0.08)\n", "x, final_cost = optim.run()\n", - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/pouch_cell_identification.ipynb b/examples/notebooks/pouch_cell_identification.ipynb index c24300ea..d952e22c 100644 --- a/examples/notebooks/pouch_cell_identification.ipynb +++ b/examples/notebooks/pouch_cell_identification.ipynb @@ -517,7 +517,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -1539,7 +1539,7 @@ } ], "source": [ - "sol = problem.evaluate(x)\n", + "sol = problem.evaluate(parameters.as_dict(x))\n", "\n", "go.Figure(\n", " [\n", diff --git a/examples/notebooks/spm_AdamW.ipynb b/examples/notebooks/spm_AdamW.ipynb index 20b73330..ec9a961a 100644 --- a/examples/notebooks/spm_AdamW.ipynb +++ b/examples/notebooks/spm_AdamW.ipynb @@ -437,7 +437,7 @@ } ], "source": [ - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -530,7 +530,7 @@ "# Plot the cost landscape\n", "pybop.plot2d(cost, steps=15)\n", "# Plot the cost landscape with optimisation path and updated bounds\n", - "bounds = np.array([[0.6, 0.9], [0.5, 0.8]])\n", + "bounds = np.asarray([[0.6, 0.9], [0.5, 0.8]])\n", "pybop.plot2d(optim, bounds=bounds, steps=15);" ] }, diff --git a/examples/notebooks/spm_electrode_design.ipynb b/examples/notebooks/spm_electrode_design.ipynb index 950cee32..e1fd5820 100644 --- a/examples/notebooks/spm_electrode_design.ipynb +++ b/examples/notebooks/spm_electrode_design.ipynb @@ -277,7 +277,7 @@ "source": [ "x, final_cost = optim.run()\n", "print(\"Estimated parameters:\", x)\n", - "print(f\"Initial gravimetric energy density: {-cost(cost.x0):.2f} Wh.kg-1\")\n", + "print(f\"Initial gravimetric energy density: {-cost(optim.x0):.2f} Wh.kg-1\")\n", "print(f\"Optimised gravimetric energy density: {-final_cost:.2f} Wh.kg-1\")" ] }, @@ -329,7 +329,7 @@ "source": [ "if cost.update_capacity:\n", " problem._model.approximate_capacity(x)\n", - "pybop.quick_plot(problem, parameter_values=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/scripts/BPX_spm.py b/examples/scripts/BPX_spm.py index 6fdb7649..eea65884 100644 --- a/examples/scripts/BPX_spm.py +++ b/examples/scripts/BPX_spm.py @@ -51,7 +51,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/cuckoo.py b/examples/scripts/cuckoo.py new file mode 100644 index 00000000..fcdfadc1 --- /dev/null +++ b/examples/scripts/cuckoo.py @@ -0,0 +1,75 @@ +import numpy as np + +import pybop + +# Define model +parameter_set = pybop.ParameterSet.pybamm("Chen2020") +model = pybop.lithium_ion.SPM(parameter_set=parameter_set) + +# Fitting parameters +parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.05), + bounds=[0.4, 0.75], + initial_value=0.41, + true_value=0.7, + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.48, 0.05), + bounds=[0.4, 0.75], + initial_value=0.41, + true_value=0.67, + ), +) +init_soc = 0.7 +experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 3 minutes (4 second period)", + "Charge at 0.5C for 3 minutes (4 second period)", + ), + ] +) +values = model.predict( + init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict("true") +) + +sigma = 0.002 +corrupt_values = values["Voltage [V]"].data + np.random.normal( + 0, sigma, len(values["Voltage [V]"].data) +) + +# Form dataset +dataset = pybop.Dataset( + { + "Time [s]": values["Time [s]"].data, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } +) + +# Generate problem, cost function, and optimisation class +problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) +cost = pybop.GaussianLogLikelihood(problem, sigma0=sigma * 4) +optim = pybop.Optimisation( + cost, + optimiser=pybop.CuckooSearch, + max_iterations=100, +) + +x, final_cost = optim.run() +print("Estimated parameters:", x) + +# Plot the timeseries output +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") + +# Plot convergence +pybop.plot_convergence(optim) + +# Plot the parameter traces +pybop.plot_parameters(optim) + +# Plot the cost landscape with optimisation path +pybop.plot2d(optim, steps=15) diff --git a/examples/scripts/ecm_CMAES.py b/examples/scripts/ecm_CMAES.py index fc711cab..953d7e6a 100644 --- a/examples/scripts/ecm_CMAES.py +++ b/examples/scripts/ecm_CMAES.py @@ -89,7 +89,7 @@ pybop.plot_dataset(dataset) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -101,5 +101,5 @@ pybop.plot2d(cost, steps=15) # Plot the cost landscape with optimisation path and updated bounds -bounds = np.array([[1e-4, 1e-2], [1e-5, 1e-2]]) +bounds = np.asarray([[1e-4, 1e-2], [1e-5, 1e-2]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py index d469c781..622a68e5 100644 --- a/examples/scripts/exp_UKF.py +++ b/examples/scripts/exp_UKF.py @@ -1,11 +1,10 @@ import numpy as np -import pybamm import pybop from examples.standalone.model import ExponentialDecay # Parameter set and model definition -parameter_set = pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}) +parameter_set = {"k": "[input]", "y0": "[input]"} model = ExponentialDecay(parameter_set=parameter_set, n_states=1) # Fitting parameters @@ -28,7 +27,8 @@ sigma = 1e-2 t_eval = np.linspace(0, 20, 10) model.parameters = parameters -values = model.predict(t_eval=t_eval, inputs=parameters.true_value()) +true_inputs = parameters.as_dict("true") +values = model.predict(t_eval=t_eval, inputs=true_inputs) values = values["2y"].data corrupt_values = values + np.random.normal(0, sigma, len(t_eval)) @@ -41,7 +41,7 @@ model.build(parameters=parameters) simulator = pybop.Observer(parameters, model, signal=["2y"]) simulator._time_data = t_eval -measurements = simulator.evaluate(parameters.true_value()) +measurements = simulator.evaluate(true_inputs) # Verification step: Compare by plotting go = pybop.PlotlyManager().go @@ -84,7 +84,7 @@ ) # Verification step: Find the maximum likelihood estimate given the true parameters -estimation = observer.evaluate(parameters.true_value()) +estimation = observer.evaluate(true_inputs) # Verification step: Add the estimate to the plot line4 = go.Scatter( @@ -102,7 +102,7 @@ print("Estimated parameters:", x) # Plot the timeseries output (requires model that returns Voltage) -pybop.quick_plot(observer, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(observer, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/gitt.py b/examples/scripts/gitt.py index 52517fdb..6d3b4a94 100644 --- a/examples/scripts/gitt.py +++ b/examples/scripts/gitt.py @@ -59,7 +59,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_AdamW.py b/examples/scripts/spm_AdamW.py index 10351512..796849be 100644 --- a/examples/scripts/spm_AdamW.py +++ b/examples/scripts/spm_AdamW.py @@ -68,7 +68,7 @@ def noise(sigma): print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -77,5 +77,5 @@ def noise(sigma): pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index 1fac5d4f..7416f924 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -13,7 +13,7 @@ prior=pybop.Gaussian(6e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Negative particle radius [m]"], - transformation=pybop.ScaledTransformation(coefficient=2.0), + transformation=pybop.LogTransformation(), ), pybop.Parameter( "Positive particle radius [m]", @@ -55,7 +55,7 @@ pybop.plot_dataset(dataset) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_IRPropMin.py b/examples/scripts/spm_IRPropMin.py index 3b38668c..1969f6f9 100644 --- a/examples/scripts/spm_IRPropMin.py +++ b/examples/scripts/spm_IRPropMin.py @@ -42,7 +42,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -51,5 +51,5 @@ pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_MAP.py b/examples/scripts/spm_MAP.py index 191f93d8..f613eccb 100644 --- a/examples/scripts/spm_MAP.py +++ b/examples/scripts/spm_MAP.py @@ -2,8 +2,16 @@ import pybop -# Define model +# Construct and update initial parameter values parameter_set = pybop.ParameterSet.pybamm("Chen2020") +parameter_set.update( + { + "Negative electrode active material volume fraction": 0.63, + "Positive electrode active material volume fraction": 0.51, + } +) + +# Define model model = pybop.lithium_ion.SPM(parameter_set=parameter_set) # Fitting parameters @@ -12,21 +20,16 @@ "Negative electrode active material volume fraction", prior=pybop.Gaussian(0.6, 0.05), bounds=[0.5, 0.8], + true_value=parameter_set["Negative electrode active material volume fraction"], ), pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.48, 0.05), bounds=[0.4, 0.7], + true_value=parameter_set["Positive electrode active material volume fraction"], ), ) -# Set initial parameter values -parameter_set.update( - { - "Negative electrode active material volume fraction": 0.63, - "Positive electrode active material volume fraction": 0.51, - } -) # Generate data sigma = 0.005 t_eval = np.arange(0, 900, 3) @@ -44,8 +47,8 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) -cost = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma) -optim = pybop.CMAES( +cost = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0=sigma) +optim = pybop.AdamW( cost, max_unchanged_iterations=20, min_iterations=20, @@ -54,10 +57,11 @@ # Run the optimisation x, final_cost = optim.run() +print("True parameters:", parameters.true_value()) print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x[0:2], title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x[0:2], title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -69,5 +73,5 @@ pybop.plot2d(cost, steps=15) # Plot the cost landscape with optimisation path -bounds = np.array([[0.55, 0.77], [0.48, 0.68]]) +bounds = np.asarray([[0.55, 0.77], [0.48, 0.68]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py index 7e1b3c93..9c9b3f36 100644 --- a/examples/scripts/spm_MLE.py +++ b/examples/scripts/spm_MLE.py @@ -16,7 +16,6 @@ pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.48, 0.05), - bounds=[0.4, 0.7], ), ) @@ -44,8 +43,8 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) -likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=[0.03, 0.03]) -optim = pybop.CMAES( +likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=sigma) +optim = pybop.IRPropMin( likelihood, max_unchanged_iterations=20, min_iterations=20, @@ -57,7 +56,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x[0:2], title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -69,5 +68,5 @@ pybop.plot2d(likelihood, steps=15) # Plot the cost landscape with optimisation path -bounds = np.array([[0.55, 0.77], [0.48, 0.68]]) +bounds = np.asarray([[0.55, 0.77], [0.48, 0.68]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_NelderMead.py b/examples/scripts/spm_NelderMead.py index 82639632..e07801e0 100644 --- a/examples/scripts/spm_NelderMead.py +++ b/examples/scripts/spm_NelderMead.py @@ -68,7 +68,7 @@ def noise(sigma): print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -77,5 +77,5 @@ def noise(sigma): pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_SNES.py b/examples/scripts/spm_SNES.py index d2afcc85..93046d63 100644 --- a/examples/scripts/spm_SNES.py +++ b/examples/scripts/spm_SNES.py @@ -42,7 +42,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_UKF.py b/examples/scripts/spm_UKF.py index e9972bd0..e528c715 100644 --- a/examples/scripts/spm_UKF.py +++ b/examples/scripts/spm_UKF.py @@ -68,7 +68,7 @@ print("Estimated parameters:", x) # Plot the timeseries output (requires model that returns Voltage) -pybop.quick_plot(observer, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(observer, problem_inputs=x, title="Optimised Comparison") # # Plot convergence # pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_XNES.py b/examples/scripts/spm_XNES.py index 59b6eca8..40900640 100644 --- a/examples/scripts/spm_XNES.py +++ b/examples/scripts/spm_XNES.py @@ -43,7 +43,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_descent.py b/examples/scripts/spm_descent.py index df57a7ca..94573f0c 100644 --- a/examples/scripts/spm_descent.py +++ b/examples/scripts/spm_descent.py @@ -48,7 +48,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -57,5 +57,5 @@ pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_pso.py b/examples/scripts/spm_pso.py index acb3e1c6..efc97ad2 100644 --- a/examples/scripts/spm_pso.py +++ b/examples/scripts/spm_pso.py @@ -43,7 +43,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_scipymin.py b/examples/scripts/spm_scipymin.py index 8c7b80c5..b6cec3f0 100644 --- a/examples/scripts/spm_scipymin.py +++ b/examples/scripts/spm_scipymin.py @@ -45,7 +45,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index 800a535c..f5b7c827 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -12,11 +12,11 @@ # NOTE: This script can be easily adjusted to consider the volumetric # (instead of gravimetric) energy density by changing the line which # defines the cost and changing the output to: -# print(f"Initial volumetric energy density: {cost(cost.x0):.2f} Wh.m-3") +# print(f"Initial volumetric energy density: {cost(optim.x0):.2f} Wh.m-3") # print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3") # Define parameter set and model -parameter_set = pybop.ParameterSet.pybamm("Chen2020") +parameter_set = pybop.ParameterSet.pybamm("Chen2020", formation_concentrations=True) model = pybop.lithium_ion.SPMe(parameter_set=parameter_set) # Fitting parameters @@ -54,13 +54,13 @@ # Run optimisation x, final_cost = optim.run() print("Estimated parameters:", x) -print(f"Initial gravimetric energy density: {cost(cost.x0):.2f} Wh.kg-1") +print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1") print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1") # Plot the timeseries output if cost.update_capacity: problem._model.approximate_capacity(x) -pybop.quick_plot(problem, parameter_values=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot the cost landscape with optimisation path if len(x) == 2: diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 6816c39d..bf5d600f 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -43,7 +43,7 @@ def __init__(self, problem=None): ) self.x0 = self.parameters.initial_value() - def _evaluate(self, x): + def _evaluate(self, inputs): """ Calculate the cost for a given parameter value. @@ -52,9 +52,8 @@ def _evaluate(self, x): Parameters ---------- - x : array-like - A one-element array containing the parameter value for which to - evaluate the cost. + inputs : Dict + The parameters for which to evaluate the cost. grad : array-like, optional Unused parameter, present for compatibility with gradient-based optimizers. @@ -65,4 +64,4 @@ def _evaluate(self, x): The calculated cost value for the given parameter. """ - return x[0] ** 2 + 42 + return inputs["x"] ** 2 + 42 diff --git a/examples/standalone/model.py b/examples/standalone/model.py index e6747746..f5d5a7ab 100644 --- a/examples/standalone/model.py +++ b/examples/standalone/model.py @@ -18,7 +18,8 @@ def __init__( parameter_set: pybamm.ParameterValues = None, n_states: int = 1, ): - super().__init__() + super().__init__(name=name, parameter_set=parameter_set) + self.n_states = n_states if n_states < 1: raise ValueError("The number of states (n_states) must be greater than 0") @@ -38,10 +39,11 @@ def __init__( ) self._unprocessed_model = self.pybamm_model - self.name = name self.default_parameter_values = ( - default_parameter_values if parameter_set is None else parameter_set + default_parameter_values + if self._parameter_set is None + else self._parameter_set ) self._parameter_set = self.default_parameter_values self._unprocessed_parameter_set = self._parameter_set diff --git a/examples/standalone/problem.py b/examples/standalone/problem.py index d6d1f4b0..d76f9dca 100644 --- a/examples/standalone/problem.py +++ b/examples/standalone/problem.py @@ -42,31 +42,34 @@ def __init__( ) self._target = {signal: self._dataset[signal] for signal in self.signal} - def evaluate(self, x): + def evaluate(self, inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Dict + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with given inputs. """ - return {signal: x[0] * self._time_data + x[1] for signal in self.signal} + return { + signal: inputs["Gradient"] * self._time_data + inputs["Intercept"] + for signal in self.signal + } - def evaluateS1(self, x): + def evaluateS1(self, inputs): """ Evaluate the model with the given parameters and return the signal and its derivatives. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Dict + Parameters for evaluation of the model. Returns ------- @@ -75,7 +78,7 @@ def evaluateS1(self, x): with given inputs x. """ - y = {signal: x[0] * self._time_data + x[1] for signal in self.signal} + y = self.evaluate(inputs) dy = np.zeros((self.n_time_data, self.n_outputs, self.n_parameters)) dy[:, 0, 0] = self._time_data diff --git a/pybop/__init__.py b/pybop/__init__.py index 5755a4b3..f3a7bb57 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -97,7 +97,6 @@ RootMeanSquaredError, SumSquaredError, ObserverCost, - MAP, ) from .costs.design_costs import ( DesignCost, @@ -108,12 +107,14 @@ BaseLikelihood, GaussianLogLikelihood, GaussianLogLikelihoodKnownSigma, + MAP, ) # # Optimiser class # +from .optimisers._cuckoo import CuckooSearchImpl from .optimisers._adamw import AdamWImpl from .optimisers.base_optimiser import BaseOptimiser, Result from .optimisers.base_pints_optimiser import BasePintsOptimiser @@ -131,6 +132,7 @@ PSO, SNES, XNES, + CuckooSearch, AdamW, ) from .optimisers.optimisation import Optimisation diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 7aa027e2..ff15a79a 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -1,7 +1,11 @@ +from typing import List, Tuple, Union + import numpy as np -from pybop import IdentityTransformation from pybop.costs.base_cost import BaseCost +from pybop.parameters.parameter import Inputs, Parameter, Parameters +from pybop.parameters.priors import Uniform +from pybop.problems.base_problem import BaseProblem class BaseLikelihood(BaseCost): @@ -9,7 +13,7 @@ class BaseLikelihood(BaseCost): Base class for likelihoods """ - def __init__(self, problem): + def __init__(self, problem: BaseProblem): super(BaseLikelihood, self).__init__(problem) self.n_time_data = problem.n_time_data @@ -22,92 +26,74 @@ class GaussianLogLikelihoodKnownSigma(BaseLikelihood): Parameters ---------- - sigma : scalar or array + sigma0 : scalar or array Initial standard deviation around ``x0``. Either a scalar value (one standard deviation for all coordinates) or an array with one entry - per dimension. Not all methods will use this information. + per dimension. """ - def __init__(self, problem, sigma): + def __init__(self, problem: BaseProblem, sigma0: Union[List[float], float]): super(GaussianLogLikelihoodKnownSigma, self).__init__(problem) - self.sigma = None - self.set_sigma(sigma) - self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi / self.sigma) - self._multip = -1 / (2.0 * self.sigma**2) - self.sigma2 = self.sigma**-2 + sigma0 = self.check_sigma0(sigma0) + self.sigma2 = sigma0**2.0 + self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi * self.sigma2) + self._multip = -1 / (2.0 * self.sigma2) self._dl = np.ones(self.n_parameters) - def set_sigma(self, sigma): + def _evaluate(self, inputs: Inputs) -> float: """ - Setter for sigma parameter + Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ - if sigma is None: - raise ValueError( - "The GaussianLogLikelihoodKnownSigma cost requires sigma to be " - + "either a scalar value or an array with one entry per dimension." - ) - - if not isinstance(sigma, np.ndarray): - sigma = np.array(sigma) - - if not np.issubdtype(sigma.dtype, np.number): - raise ValueError("Sigma must contain only numeric values") + y = self.problem.evaluate(inputs) + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf # prediction length doesn't match target - if np.any(sigma <= 0): - raise ValueError("Sigma must be positive") - else: - self.sigma = sigma - - def get_sigma(self): - """ - Getter for sigma parameter - """ - return self.sigma - - def _evaluate(self, x): - """ - Calls the problem.evaluate method and calculates - the log-likelihood - """ - y = self.problem.evaluate(x) - - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - return -np.float64(np.inf) # prediction doesn't match target - - e = np.array( + e = np.sum( [ np.sum( self._offset - + self._multip * np.sum((self._target[signal] - y[signal]) ** 2) + + self._multip * np.sum((self._target[signal] - y[signal]) ** 2.0) ) for signal in self.signal ] ) - if self.n_outputs == 1: - return e.item() - else: - return np.sum(e) + return e if self.n_outputs != 1 else e.item() - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calls the problem.evaluateS1 method and calculates the log-likelihood and gradient. """ - y, dy = self.problem.evaluateS1(x) + y, dy = self.problem.evaluateS1(inputs) + + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf, -self._dl - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - likelihood = np.float64(np.inf) - dl = self._dl * np.ones(self.n_parameters) - return -likelihood, -dl + likelihood = self._evaluate(inputs) + + r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) + dl = np.sum((np.sum((r * dy.T), axis=2) / self.sigma2), axis=1) - r = np.array([self._target[signal] - y[signal] for signal in self.signal]) - likelihood = self._evaluate(x) - dl = np.sum((self.sigma2 * np.sum((r * dy.T), axis=2)), axis=1) return likelihood, dl + def check_sigma0(self, sigma0: Union[np.ndarray, float]): + """ + Check the validity of sigma0. + """ + sigma0 = np.asarray(sigma0, dtype=float) + if not np.all(sigma0 > 0): + raise ValueError("Sigma0 must be positive") + if np.shape(sigma0) not in [(), (1,), (self.n_outputs,)]: + raise ValueError( + "sigma0 must be either a scalar value (one standard deviation for " + + "all coordinates) or an array with one entry per dimension." + ) + return sigma0 + class GaussianLogLikelihood(BaseLikelihood): """ @@ -115,80 +101,257 @@ class GaussianLogLikelihood(BaseLikelihood): data follows a Gaussian distribution and computes the log-likelihood of observed data under this assumption. + This class estimates the standard deviation of the Gaussian distribution + alongside the parameters of the model. + Attributes ---------- _logpi : float Precomputed offset value for the log-likelihood function. + _dsigma_scale : float + Scale factor for derivative of standard deviation. """ - def __init__(self, problem): + def __init__( + self, + problem: BaseProblem, + sigma0: Union[float, List[float], List[Parameter]] = 0.002, + dsigma_scale: float = 1.0, + ): super(GaussianLogLikelihood, self).__init__(problem) + self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) - self._dl = np.ones(self.n_parameters + self.n_outputs) - if self.transformation: - self.transformation.append( - IdentityTransformation() - ) # Temporary fix, ahead of #338 - def _evaluate(self, x): + self.sigma = Parameters() + self._add_sigma_parameters(sigma0) + self.parameters.join(self.sigma) + self._dl = np.ones(self.n_parameters) + + def _add_sigma_parameters(self, sigma0): + sigma0 = [sigma0] if not isinstance(sigma0, List) else sigma0 + sigma0 = self._pad_sigma0(sigma0) + + for i, value in enumerate(sigma0): + self._add_single_sigma(i, value) + + def _pad_sigma0(self, sigma0): + if len(sigma0) < self.n_outputs: + return np.pad( + sigma0, + (0, self.n_outputs - len(sigma0)), + constant_values=sigma0[-1], + ) + return sigma0 + + def _add_single_sigma(self, index, value): + if isinstance(value, Parameter): + self.sigma.add(value) + elif isinstance(value, (int, float)): + self.sigma.add( + Parameter( + f"Sigma for output {index+1}", + initial_value=value, + prior=Uniform(0.5 * value, 1.5 * value), + ) + ) + else: + raise TypeError( + f"Expected sigma0 to contain Parameter objects or numeric values. " + f"Received {type(value)}" + ) + + @property + def dsigma_scale(self): + """ + Scaling factor for the dsigma term in the gradient calculation. + """ + return self._dsigma_scale + + @dsigma_scale.setter + def dsigma_scale(self, new_value): + if new_value < 0: + raise ValueError("dsigma_scale must be non-negative") + self._dsigma_scale = new_value + + def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: """ Evaluates the Gaussian log-likelihood for the given parameters. - Args: - x (array_like): The parameters for which to evaluate the log-likelihood. - The last `self.n_outputs` elements are assumed to be the - standard deviations of the Gaussian distributions. + Parameters + ---------- + inputs : Inputs + The parameters for which to evaluate the log-likelihood, including the `n_outputs` + standard deviations of the Gaussian distributions. - Returns: - float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. + Returns + ------- + float + The log-likelihood value, or -inf if the standard deviations are non-positive. """ - sigma = np.asarray(x[-self.n_outputs :]) + self.parameters.update(values=list(inputs.values())) + sigma = self.sigma.current_value() if np.any(sigma <= 0): return -np.inf - y = self.problem.evaluate(x[: -self.n_outputs]) - - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - return -np.float64(np.inf) # prediction doesn't match target + y = self.problem.evaluate(self.problem.parameters.as_dict()) + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf # prediction length doesn't match target - e = np.array( + e = np.sum( [ np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum((self._target[signal] - y[signal]) ** 2) / (2.0 * sigma**2) + - np.sum((self._target[signal] - y[signal]) ** 2.0) + / (2.0 * sigma**2.0) ) for signal in self.signal ] ) - if self.n_outputs == 1: - return e.item() - else: - return np.sum(e) + return e if self.n_outputs != 1 else e.item() - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calls the problem.evaluateS1 method and calculates the log-likelihood. + + Parameters + ---------- + inputs : Inputs + The parameters for which to evaluate the log-likelihood. + + Returns + ------- + Tuple[float, np.ndarray] + The log-likelihood and its gradient. """ - sigma = np.asarray(x[-self.n_outputs :]) + self.parameters.update(values=list(inputs.values())) + sigma = self.sigma.current_value() if np.any(sigma <= 0): - return -np.float64(np.inf), -self._dl * np.ones(self.n_parameters) - - y, dy = self.problem.evaluateS1(x[: -self.n_outputs]) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - likelihood = np.float64(np.inf) - dl = self._dl * np.ones(self.n_parameters) - return -likelihood, -dl - - r = np.array([self._target[signal] - y[signal] for signal in self.signal]) - likelihood = self._evaluate(x) - dl = sigma ** (-2.0) * np.sum((r * dy.T), axis=2) - dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=1) + return -np.inf, -self._dl + + y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf, -self._dl + + likelihood = self._evaluate(inputs) + + r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) + dl = np.sum((np.sum((r * dy.T), axis=2) / (sigma**2.0)), axis=1) + dsigma = ( + -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) + ) / self._dsigma_scale dl = np.concatenate((dl.flatten(), dsigma)) + return likelihood, dl + + +class MAP(BaseLikelihood): + """ + Maximum a posteriori cost function. + + Computes the maximum a posteriori cost function, which is the sum of the + log likelihood and the log prior. The goal of maximising is achieved by + setting minimising = False in the optimiser settings. + + Inherits all parameters and attributes from ``BaseLikelihood``. + + """ + + def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-3): + super(MAP, self).__init__(problem) + self.sigma0 = sigma0 + self.gradient_step = gradient_step + if self.sigma0 is None: + self.sigma0 = [] + for param in self.problem.parameters: + self.sigma0.append(param.prior.sigma) + + try: + self.likelihood = likelihood(problem=self.problem, sigma0=self.sigma0) + except Exception as e: + raise ValueError( + f"An error occurred when constructing the Likelihood class: {e}" + ) + + if hasattr(self, "likelihood") and not isinstance( + self.likelihood, BaseLikelihood + ): + raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") + + def _evaluate(self, inputs: Inputs) -> float: + """ + Calculate the maximum a posteriori cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to evaluate the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The maximum a posteriori cost. + """ + log_likelihood = self.likelihood._evaluate(inputs) + log_prior = sum( + self.parameters[key].prior.logpdf(value) for key, value in inputs.items() + ) + + posterior = log_likelihood + log_prior + return posterior + + def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: + """ + Compute the maximum a posteriori with respect to the parameters. + The method passes the likelihood gradient to the optimiser without modification. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + + Raises + ------ + ValueError + If an error occurs during the calculation of the cost or gradient. + """ + log_likelihood, dl = self.likelihood._evaluateS1(inputs) + log_prior = sum( + self.parameters[key].prior.logpdf(value) for key, value in inputs.items() + ) + + # Compute a finite difference approximation of the gradient of the log prior + delta = self.parameters.initial_value() * self.gradient_step + prior_gradient = [] + + for parameter, step_size in zip(self.problem.parameters, delta): + param_value = inputs[parameter.name] + + log_prior_upper = parameter.prior.logpdf(param_value * (1 + step_size)) + log_prior_lower = parameter.prior.logpdf(param_value * (1 - step_size)) + + gradient = (log_prior_upper - log_prior_lower) / ( + 2 * step_size * param_value + np.finfo(float).eps + ) + prior_gradient.append(gradient) + + posterior = log_likelihood + log_prior + total_gradient = dl + prior_gradient + + return posterior, total_gradient diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 525ab9f5..132ca6c2 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,4 +1,5 @@ from pybop import BaseProblem, ComposedTransformation, IdentityTransformation +from pybop.parameters.parameter import Inputs, Parameters class BaseCost: @@ -17,21 +18,17 @@ class BaseCost: evaluating the cost function. _target : array-like An array containing the target data to fit. - x0 : array-like - The initial guess for the model parameters. n_outputs : int The number of outputs in the model. """ def __init__(self, problem=None): - self.parameters = None + self.parameters = Parameters() self.transformation = None self.problem = problem - self.x0 = None if isinstance(self.problem, BaseProblem): self._target = self.problem._target - self.parameters = self.problem.parameters - self.x0 = self.problem.x0 + self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal self.transformation = self.construct_transformation() @@ -78,8 +75,10 @@ def evaluate(self, x): ValueError If an error occurs during the calculation of the cost. """ + inputs = self.parameters.verify(x) + try: - return self._evaluate(x) + return self._evaluate(inputs) except NotImplementedError as e: raise e @@ -87,7 +86,7 @@ def evaluate(self, x): except Exception as e: raise ValueError(f"Error in cost calculation: {e}") - def _evaluate(self, x): + def _evaluate(self, inputs: Inputs): """ Calculate the cost function value for a given set of parameters. @@ -95,7 +94,7 @@ def _evaluate(self, x): Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. Returns @@ -130,12 +129,14 @@ def evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ + inputs = self.parameters.verify(x) + try: if self.transformation: - p = self.transformation.to_model(x) + p = self.transformation.to_model(inputs) return self._evaluateS1(p) else: - return self._evaluateS1(x) + return self._evaluateS1(inputs) except NotImplementedError as e: raise e @@ -143,13 +144,13 @@ def evaluateS1(self, x): except Exception as e: raise ValueError(f"Error in cost calculation: {e}") - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 16bf4b5f..64f44572 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -2,8 +2,8 @@ import numpy as np -from pybop import is_numeric from pybop.costs.base_cost import BaseCost +from pybop.parameters.parameter import Inputs class DesignCost(BaseCost): @@ -44,20 +44,20 @@ def __init__(self, problem, update_capacity=False): warnings.warn(nominal_capacity_warning, UserWarning) self.update_capacity = update_capacity self.parameter_set = problem.model.parameter_set - self.update_simulation_data(self.x0) + self.update_simulation_data(self.parameters.as_dict("initial")) - def update_simulation_data(self, x0): + def update_simulation_data(self, inputs: Inputs): """ Updates the simulation data based on the initial parameter values. Parameters ---------- - x0 : array + inputs : Inputs The initial parameter values for the simulation. """ if self.update_capacity: - self.problem.model.approximate_capacity(x0) - solution = self.problem.evaluate(x0) + self.problem.model.approximate_capacity(inputs) + solution = self.problem.evaluate(inputs) if "Time [s]" not in solution: raise ValueError("The solution does not contain time data.") @@ -65,7 +65,7 @@ def update_simulation_data(self, x0): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def _evaluate(self, x): + def _evaluate(self, inputs: Inputs): """ Computes the value of the cost function. @@ -73,8 +73,8 @@ def _evaluate(self, x): Parameters ---------- - x : array - The parameter set for which to compute the cost. + inputs : Inputs + The parameters for which to compute the cost. Raises ------ @@ -97,31 +97,28 @@ class GravimetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(GravimetricEnergyDensity, self).__init__(problem, update_capacity) - def _evaluate(self, x): + def _evaluate(self, inputs: Inputs): """ Computes the cost function for the energy density. Parameters ---------- - x : array - The parameter set for which to compute the cost. + inputs : Inputs + The parameters for which to compute the cost. Returns ------- float The gravimetric energy density or -infinity in case of infeasible parameters. """ - if not all(is_numeric(i) for i in x): - raise ValueError("Input must be a numeric array.") - try: with warnings.catch_warnings(): # Convert UserWarning to an exception warnings.filterwarnings("error", category=UserWarning) if self.update_capacity: - self.problem.model.approximate_capacity(x) - solution = self.problem.evaluate(x) + self.problem.model.approximate_capacity(inputs) + solution = self.problem.evaluate(inputs) voltage, current = solution["Voltage [V]"], solution["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( @@ -154,30 +151,28 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super(VolumetricEnergyDensity, self).__init__(problem, update_capacity) - def _evaluate(self, x): + def _evaluate(self, inputs: Inputs): """ Computes the cost function for the energy density. Parameters ---------- - x : array - The parameter set for which to compute the cost. + inputs : Inputs + The parameters for which to compute the cost. Returns ------- float The volumetric energy density or -infinity in case of infeasible parameters. """ - if not all(is_numeric(i) for i in x): - raise ValueError("Input must be a numeric array.") try: with warnings.catch_warnings(): # Convert UserWarning to an exception warnings.filterwarnings("error", category=UserWarning) if self.update_capacity: - self.problem.model.approximate_capacity(x) - solution = self.problem.evaluate(x) + self.problem.model.approximate_capacity(inputs) + solution = self.problem.evaluate(inputs) voltage, current = solution["Voltage [V]"], solution["Current [A]"] energy_density = np.trapz(voltage * current, dx=self.dt) / ( diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 57487f39..6a3c8153 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -1,8 +1,8 @@ import numpy as np -from pybop.costs._likelihoods import BaseLikelihood from pybop.costs.base_cost import BaseCost from pybop.observers.observer import Observer +from pybop.parameters.parameter import Inputs class RootMeanSquaredError(BaseCost): @@ -23,13 +23,13 @@ def __init__(self, problem): # Default fail gradient self._de = 1.0 - def _evaluate(self, x): + def _evaluate(self, inputs: Inputs): """ Calculate the root mean square error for a given set of parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. grad : array-like, optional An array to store the gradient of the cost function with respect @@ -41,13 +41,13 @@ def _evaluate(self, x): The root mean square error. """ - prediction = self.problem.evaluate(x) + prediction = self.problem.evaluate(inputs) for key in self.signal: if len(prediction.get(key, [])) != len(self._target.get(key, [])): return np.float64(np.inf) # prediction doesn't match target - e = np.array( + e = np.asarray( [ np.sqrt(np.mean((prediction[signal] - self._target[signal]) ** 2)) for signal in self.signal @@ -59,13 +59,13 @@ def _evaluate(self, x): else: return np.sum(e) - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns @@ -79,7 +79,7 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) + y, dy = self.problem.evaluateS1(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): @@ -87,7 +87,7 @@ def _evaluateS1(self, x): de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) e = np.sqrt(np.mean(r**2, axis=1)) de = np.mean((r * dy.T), axis=2) / (e + np.finfo(float).eps) @@ -136,13 +136,13 @@ def __init__(self, problem): # Default fail gradient self._de = 1.0 - def _evaluate(self, x): + def _evaluate(self, inputs: Inputs): """ Calculate the sum of squared errors for a given set of parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. Returns @@ -150,13 +150,13 @@ def _evaluate(self, x): float The sum of squared errors. """ - prediction = self.problem.evaluate(x) + prediction = self.problem.evaluate(inputs) for key in self.signal: if len(prediction.get(key, [])) != len(self._target.get(key, [])): return np.float64(np.inf) # prediction doesn't match target - e = np.array( + e = np.asarray( [ np.sum(((prediction[signal] - self._target[signal]) ** 2)) for signal in self.signal @@ -167,13 +167,13 @@ def _evaluate(self, x): else: return np.sum(e) - def _evaluateS1(self, x): + def _evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns @@ -187,14 +187,14 @@ def _evaluateS1(self, x): ValueError If an error occurs during the calculation of the cost or gradient. """ - y, dy = self.problem.evaluateS1(x) + y, dy = self.problem.evaluateS1(inputs) for key in self.signal: if len(y.get(key, [])) != len(self._target.get(key, [])): e = np.float64(np.inf) de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) e = np.sum(np.sum(r**2, axis=0), axis=0) de = 2 * np.sum(np.sum((r * dy.T), axis=2), axis=1) @@ -231,13 +231,13 @@ def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - def _evaluate(self, x): + def _evaluate(self, inputs: Inputs): """ Calculate the observer cost for a given set of parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to evaluate the cost. grad : array-like, optional An array to store the gradient of the cost function with respect @@ -248,19 +248,18 @@ def _evaluate(self, x): float The observer cost (negative of the log likelihood). """ - inputs = self._observer.parameters.as_dict(x) log_likelihood = self._observer.log_likelihood( self._target, self._observer.time_data(), inputs ) return -log_likelihood - def evaluateS1(self, x): + def evaluateS1(self, inputs: Inputs): """ Compute the cost and its gradient with respect to the parameters. Parameters ---------- - x : array-like + inputs : Inputs The parameters for which to compute the cost and gradient. Returns @@ -275,90 +274,3 @@ def evaluateS1(self, x): If an error occurs during the calculation of the cost or gradient. """ raise NotImplementedError - - -class MAP(BaseLikelihood): - """ - Maximum a posteriori cost function. - - Computes the maximum a posteriori cost function, which is the sum of the - log likelihood and the log prior. The goal of maximising is achieved by - setting minimising = False in the optimiser settings. - - Inherits all parameters and attributes from ``BaseLikelihood``. - - """ - - def __init__(self, problem, likelihood, sigma=None): - super(MAP, self).__init__(problem) - self.sigma0 = sigma - if self.sigma0 is None: - self.sigma0 = [] - for param in self.problem.parameters: - self.sigma0.append(param.prior.sigma) - - try: - self.likelihood = likelihood(problem=self.problem, sigma=self.sigma0) - except Exception as e: - raise ValueError( - f"An error occurred when constructing the Likelihood class: {e}" - ) - - if hasattr(self, "likelihood") and not isinstance( - self.likelihood, BaseLikelihood - ): - raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") - - def _evaluate(self, x): - """ - Calculate the maximum a posteriori cost for a given set of parameters. - - Parameters - ---------- - x : array-like - The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The maximum a posteriori cost. - """ - log_likelihood = self.likelihood.evaluate(x) - log_prior = sum( - param.prior.logpdf(x_i) for x_i, param in zip(x, self.problem.parameters) - ) - - posterior = log_likelihood + log_prior - return posterior - - def _evaluateS1(self, x): - """ - Compute the maximum a posteriori with respect to the parameters. - The method passes the likelihood gradient to the optimiser without modification. - - Parameters - ---------- - x : array-like - The parameters for which to compute the cost and gradient. - - Returns - ------- - tuple - A tuple containing the cost and the gradient. The cost is a float, - and the gradient is an array-like of the same length as `x`. - - Raises - ------ - ValueError - If an error occurs during the calculation of the cost or gradient. - """ - log_likelihood, dl = self.likelihood.evaluateS1(x) - log_prior = sum( - param.prior.logpdf(x_i) for x_i, param in zip(x, self.problem.parameters) - ) - - posterior = log_likelihood + log_prior - return posterior, dl diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py index e9809bc4..e9ba3d6c 100644 --- a/pybop/models/base_model.py +++ b/pybop/models/base_model.py @@ -7,8 +7,7 @@ import pybamm from pybop import Dataset, Experiment, Parameters, ParameterSet - -Inputs = Dict[str, float] +from pybop.parameters.parameter import Inputs @dataclass @@ -65,7 +64,7 @@ def __init__(self, name="Base Model", parameter_set=None): else: # a pybop parameter set self._parameter_set = pybamm.ParameterValues(parameter_set.params) - self.parameters = None + self.parameters = Parameters() self.dataset = None self.signal = None self.additional_variables = [] @@ -74,10 +73,6 @@ def __init__(self, name="Base Model", parameter_set=None): self.param_check_counter = 0 self.allow_infeasible_solutions = True - @property - def n_parameters(self): - return len(self.parameters) - def build( self, dataset: Dataset = None, @@ -104,8 +99,8 @@ def build( The initial state of charge to be used in simulations. """ self.dataset = dataset - self.parameters = parameters - if self.parameters is not None: + if parameters is not None: + self.parameters = parameters self.classify_and_update_parameters(self.parameters) if init_soc is not None: @@ -223,8 +218,8 @@ def rebuild( The initial state of charge to be used in simulations. """ self.dataset = dataset + if parameters is not None: - self.parameters = parameters self.classify_and_update_parameters(parameters) if init_soc is not None: @@ -243,7 +238,7 @@ def rebuild( # Clear solver and setup model self._solver._model_set_up = {} - def classify_and_update_parameters(self, parameters: Union[Parameters, Dict]): + def classify_and_update_parameters(self, parameters: Parameters): """ Update the parameter values according to their classification as either 'rebuild_parameters' which require a model rebuild and @@ -251,10 +246,16 @@ def classify_and_update_parameters(self, parameters: Union[Parameters, Dict]): Parameters ---------- - parameters : pybop.ParameterSet + parameters : pybop.Parameters """ - parameter_dictionary = parameters.as_dict() + if parameters is None: + self.parameters = Parameters() + else: + self.parameters = parameters + + parameter_dictionary = self.parameters.as_dict() + rebuild_parameters = { param: parameter_dictionary[param] for param in parameter_dictionary @@ -275,6 +276,9 @@ def classify_and_update_parameters(self, parameters: Union[Parameters, Dict]): self._unprocessed_parameter_set = self._parameter_set self.geometry = self.pybamm_model.default_geometry + # Update the list of parameter names and number of parameters + self._n_parameters = len(self.parameters) + def reinit( self, inputs: Inputs, t: float = 0.0, x: Optional[np.ndarray] = None ) -> TimeSeriesState: @@ -284,15 +288,14 @@ def reinit( if self._built_model is None: raise ValueError("Model must be built before calling reinit") - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) + inputs = self.parameters.verify(inputs) self._solver.set_up(self._built_model, inputs=inputs) if x is None: x = self._built_model.y0 - sol = pybamm.Solution([np.array([t])], [x], self._built_model, inputs) + sol = pybamm.Solution([np.asarray([t])], [x], self._built_model, inputs) return TimeSeriesState(sol=sol, inputs=inputs, t=t) @@ -303,7 +306,7 @@ def get_state(self, inputs: Inputs, t: float, x: np.ndarray) -> TimeSeriesState: if self._built_model is None: raise ValueError("Model must be built before calling get_state") - sol = pybamm.Solution([np.array([t])], [x], self._built_model, inputs) + sol = pybamm.Solution([np.asarray([t])], [x], self._built_model, inputs) return TimeSeriesState(sol=sol, inputs=inputs, t=t) @@ -332,9 +335,8 @@ def simulate( Parameters ---------- - inputs : dict or array-like - The input parameters for the simulation. If array-like, it will be - converted to a dictionary using the model's fit keys. + inputs : Inputs + The input parameters for the simulation. t_eval : array-like An array of time points at which to evaluate the solution. @@ -348,6 +350,8 @@ def simulate( ValueError If the model has not been built before simulation. """ + inputs = self.parameters.verify(inputs) + if self._built_model is None: raise ValueError("Model must be built before calling simulate") else: @@ -355,9 +359,6 @@ def simulate( sol = self.solver.solve(self.built_model, t_eval=t_eval) else: - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) - if self.check_params( inputs=inputs, allow_infeasible_solutions=self.allow_infeasible_solutions, @@ -385,9 +386,8 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): Parameters ---------- - inputs : dict or array-like - The input parameters for the simulation. If array-like, it will be - converted to a dictionary using the model's fit keys. + inputs : Inputs + The input parameters for the simulation. t_eval : array-like An array of time points at which to evaluate the solution and its sensitivities. @@ -402,6 +402,7 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): ValueError If the model has not been built before simulation. """ + inputs = self.parameters.verify(inputs) if self._built_model is None: raise ValueError("Model must be built before calling simulate") @@ -411,9 +412,6 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): "Cannot use sensitivies for parameters which require a model rebuild" ) - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) - if self.check_params( inputs=inputs, allow_infeasible_solutions=self.allow_infeasible_solutions, @@ -432,7 +430,7 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): ( sol[self.signal[0]].data.shape[0], self.n_outputs, - self.n_parameters, + self._n_parameters, ) ) @@ -470,10 +468,9 @@ def predict( Parameters ---------- - inputs : dict or array-like, optional - Input parameters for the simulation. If the input is array-like, it is converted - to a dictionary using the model's fitting keys. Defaults to None, indicating - that the default parameters should be used. + inputs : Inputs, optional + Input parameters for the simulation. Defaults to None, indicating that the + default parameters should be used. t_eval : array-like, optional An array of time points at which to evaluate the solution. Defaults to None, which means the time points need to be specified within experiment or elsewhere. @@ -499,13 +496,13 @@ def predict( if PyBaMM models are not supported by the current simulation method. """ + inputs = self.parameters.verify(inputs) + if not self.pybamm_model._built: self.pybamm_model.build_model() parameter_set = parameter_set or self._unprocessed_parameter_set if inputs is not None: - if not isinstance(inputs, dict): - inputs = self.parameters.as_dict(inputs) parameter_set.update(inputs) if self.check_params( @@ -544,7 +541,7 @@ def check_params( Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -555,17 +552,7 @@ def check_params( A boolean which signifies whether the parameters are compatible. """ - if inputs is not None: - if not isinstance(inputs, dict): - if isinstance(inputs, list): - for entry in inputs: - if not isinstance(entry, (int, float)): - raise ValueError( - "Expecting inputs in the form of a dictionary, numeric list" - + f" or None, but received a list with type: {type(inputs)}" - ) - else: - inputs = self.parameters.as_dict(inputs) + inputs = self.parameters.verify(inputs) return self._check_params( inputs=inputs, allow_infeasible_solutions=allow_infeasible_solutions @@ -580,7 +567,7 @@ def _check_params( Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -641,7 +628,7 @@ def cell_volume(self, parameter_set: ParameterSet = None): """ raise NotImplementedError - def approximate_capacity(self, x): + def approximate_capacity(self, inputs: Inputs): """ Calculate a new estimate for the nominal capacity based on the theoretical energy density and an average voltage. @@ -650,8 +637,8 @@ def approximate_capacity(self, x): Parameters ---------- - x : array-like - An array of values representing the model inputs. + inputs : Inputs + The parameters that are the inputs of the model. Raises ------ diff --git a/pybop/models/empirical/base_ecm.py b/pybop/models/empirical/base_ecm.py index 8d15442d..38d94d14 100644 --- a/pybop/models/empirical/base_ecm.py +++ b/pybop/models/empirical/base_ecm.py @@ -1,4 +1,4 @@ -from pybop.models.base_model import BaseModel +from pybop.models.base_model import BaseModel, Inputs class ECircuitModel(BaseModel): @@ -52,14 +52,14 @@ def __init__( # Correct OCP if set to default if ( parameter_set is not None - and "Open-circuit voltage [V]" in parameter_set.params + and "Open-circuit voltage [V]" in parameter_set.keys() ): default_ocp = self.pybamm_model.default_parameter_values[ "Open-circuit voltage [V]" ] - if parameter_set.params["Open-circuit voltage [V]"] == "default": + if parameter_set["Open-circuit voltage [V]"] == "default": print("Setting open-circuit voltage to default function") - parameter_set.params["Open-circuit voltage [V]"] = default_ocp + parameter_set["Open-circuit voltage [V]"] = default_ocp super().__init__(name=name, parameter_set=parameter_set) @@ -85,13 +85,13 @@ def __init__( self._disc = None self.geometric_parameters = {} - def _check_params(self, inputs=None, allow_infeasible_solutions=True): + def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): """ Check the compatibility of the model parameters. Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). diff --git a/pybop/models/empirical/ecm.py b/pybop/models/empirical/ecm.py index 031da3fd..893b30bc 100644 --- a/pybop/models/empirical/ecm.py +++ b/pybop/models/empirical/ecm.py @@ -1,6 +1,7 @@ from pybamm import equivalent_circuit as pybamm_equivalent_circuit from pybop.models.empirical.base_ecm import ECircuitModel +from pybop.parameters.parameter import Inputs class Thevenin(ECircuitModel): @@ -44,13 +45,13 @@ def __init__( pybamm_model=pybamm_equivalent_circuit.Thevenin, name=name, **model_kwargs ) - def _check_params(self, inputs=None, allow_infeasible_solutions=True): + def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): """ Check the compatibility of the model parameters. Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). diff --git a/pybop/models/lithium_ion/base_echem.py b/pybop/models/lithium_ion/base_echem.py index 6947774b..fd4aa268 100644 --- a/pybop/models/lithium_ion/base_echem.py +++ b/pybop/models/lithium_ion/base_echem.py @@ -2,7 +2,7 @@ from pybamm import lithium_ion as pybamm_lithium_ion -from pybop.models.base_model import BaseModel +from pybop.models.base_model import BaseModel, Inputs class EChemBaseModel(BaseModel): @@ -47,10 +47,7 @@ def __init__( solver=None, **model_kwargs, ): - super().__init__( - name=name, - parameter_set=parameter_set, - ) + super().__init__(name=name, parameter_set=parameter_set) model_options = dict(build=False) for key, value in model_kwargs.items(): @@ -88,14 +85,14 @@ def __init__( self.geometric_parameters = self.set_geometric_parameters() def _check_params( - self, inputs=None, parameter_set=None, allow_infeasible_solutions=True + self, inputs: Inputs = None, parameter_set=None, allow_infeasible_solutions=True ): """ Check compatibility of the model parameters. Parameters ---------- - inputs : dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -267,7 +264,7 @@ def area_density(thickness, mass_density): ) return cross_sectional_area * total_area_density - def approximate_capacity(self, x): + def approximate_capacity(self, inputs: Inputs): """ Calculate and update an estimate for the nominal cell capacity based on the theoretical energy density and an average voltage. @@ -277,14 +274,22 @@ def approximate_capacity(self, x): Parameters ---------- - x : array-like - An array of values representing the model inputs. + inputs : Inputs + The parameters that are the inputs of the model. Returns ------- None The nominal cell capacity is updated directly in the model's parameter set. """ + inputs = self.parameters.verify(inputs) + self._parameter_set.update(inputs) + + # Calculate theoretical energy density + theoretical_energy = self._electrode_soh.calculate_theoretical_energy( + self._parameter_set + ) + # Extract stoichiometries and compute mean values ( min_sto_neg, @@ -295,16 +300,6 @@ def approximate_capacity(self, x): mean_sto_neg = (min_sto_neg + max_sto_neg) / 2 mean_sto_pos = (min_sto_pos + max_sto_pos) / 2 - inputs = { - key: x[i] for i, key in enumerate([param.name for param in self.parameters]) - } - self._parameter_set.update(inputs) - - # Calculate theoretical energy density - theoretical_energy = self._electrode_soh.calculate_theoretical_energy( - self._parameter_set - ) - # Calculate average voltage positive_electrode_ocp = self._parameter_set["Positive electrode OCP [V]"] negative_electrode_ocp = self._parameter_set["Negative electrode OCP [V]"] diff --git a/pybop/observers/observer.py b/pybop/observers/observer.py index 162d03de..1c35c25d 100644 --- a/pybop/observers/observer.py +++ b/pybop/observers/observer.py @@ -50,16 +50,15 @@ def __init__( if model.signal is None: model.signal = self.signal - inputs = dict() - for param in self.parameters: - inputs[param.name] = param.value - + inputs = self.parameters.as_dict("initial") self._state = model.reinit(inputs) self._model = model self._signal = self.signal self._n_outputs = len(self._signal) def reset(self, inputs: Inputs) -> None: + inputs = self.parameters.verify(inputs) + self._state = self._model.reinit(inputs) def observe(self, time: float, value: Optional[np.ndarray] = None) -> float: @@ -96,6 +95,8 @@ def log_likelihood(self, values: dict, times: np.ndarray, inputs: Inputs) -> flo inputs : Inputs The inputs to the model. """ + inputs = self.parameters.verify(inputs) + if self._n_outputs == 1: signal = self._signal[0] if len(values[signal]) != len(times): @@ -134,7 +135,7 @@ def get_current_covariance(self) -> Covariance: def get_measure(self, x: TimeSeriesState) -> np.ndarray: measures = [x.sol[s].data[-1] for s in self._signal] - return np.array([[m] for m in measures]) + return np.asarray([[m] for m in measures]) def get_current_time(self) -> float: """ @@ -142,27 +143,20 @@ def get_current_time(self) -> float: """ return self._state.t - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with given inputs. """ - inputs = dict() - if isinstance(x, Parameters): - for param in x: - inputs[param.name] = param.value - else: # x is an array of parameter values - for i, param in enumerate(self.parameters): - inputs[param.name] = x[i] self.reset(inputs) output = {} diff --git a/pybop/observers/unscented_kalman.py b/pybop/observers/unscented_kalman.py index b7ea0f35..afbc2a01 100644 --- a/pybop/observers/unscented_kalman.py +++ b/pybop/observers/unscented_kalman.py @@ -15,8 +15,8 @@ class UnscentedKalmanFilterObserver(Observer): Parameters ---------- - parameters: List[Parameters] - The inputs to the model. + parameters: Parameters + The parameters for the model. model : BaseModel The model to observe. sigma0 : np.ndarray | float @@ -118,7 +118,7 @@ def observe(self, time: float, value: np.ndarray) -> float: if value is None: raise ValueError("Measurement must be provided.") elif isinstance(value, np.floating): - value = np.array([value]) + value = np.asarray([value]) dt = time - self.get_current_time() if dt < 0: @@ -201,7 +201,7 @@ def __init__( zero_cols = np.logical_and(np.all(P0 == 0, axis=1), np.all(Rp == 0, axis=1)) zeros = np.logical_and(zero_rows, zero_cols) ones = np.logical_not(zeros) - states = np.array(range(len(x0)))[ones] + states = np.asarray(range(len(x0)))[ones] bool_mask = np.ix_(ones, ones) S_filtered = linalg.cholesky(P0[ones, :][:, ones]) @@ -276,11 +276,11 @@ def gen_sigma_points( # Define the weights of the sigma points w_m0 = sigma / (L + sigma) - w_m = np.array([w_m0] + [1 / (2 * (L + sigma))] * (2 * L)) + w_m = np.asarray([w_m0] + [1 / (2 * (L + sigma))] * (2 * L)) # Define the weights of the covariance of the sigma points w_c0 = w_m0 + (1 - alpha**2 + beta) - w_c = np.array([w_c0] + [1 / (2 * (L + sigma))] * (2 * L)) + w_c = np.asarray([w_c0] + [1 / (2 * (L + sigma))] * (2 * L)) return (points, w_m, w_c) diff --git a/pybop/optimisers/_cuckoo.py b/pybop/optimisers/_cuckoo.py new file mode 100644 index 00000000..34605272 --- /dev/null +++ b/pybop/optimisers/_cuckoo.py @@ -0,0 +1,197 @@ +import numpy as np +from pints import PopulationBasedOptimiser +from scipy.special import gamma + + +class CuckooSearchImpl(PopulationBasedOptimiser): + """ + Cuckoo Search (CS) optimisation algorithm, inspired by the brood parasitism + of some cuckoo species. This algorithm was introduced by Yang and Deb in 2009. + + The algorithm uses a population of host nests (solutions), where each cuckoo + (new solution) tries to replace a worse nest in the population. The quality + or fitness of the nests is determined by the cost function. A fraction + of the worst nests is abandoned at each generation, and new ones are built + randomly. + + The pseudo-code for the Cuckoo Search is as follows: + + 1. Initialise population of n host nests + 2. While (t < max_generations): + a. Get a cuckoo randomly by Lévy flights + b. Evaluate its quality/fitness F + c. Choose a nest among n (say, j) randomly + d. If (F > fitness of j): + i. Replace j with the new solution + e. Abandon a fraction (pa) of the worst nests and build new ones + f. Keep the best solutions/nests + g. Rank the solutions and find the current best + 3. End While + + This implementation also uses a decreasing step size for the Lévy flights, calculated + as sigma = sigma0 / sqrt(iterations), where sigma0 is the initial step size and + iterations is the current iteration number. + + Parameters: + - pa: Probability of discovering alien eggs/solutions (abandoning rate) + + References: + - X. -S. Yang and Suash Deb, "Cuckoo Search via Lévy flights," + 2009 World Congress on Nature & Biologically Inspired Computing (NaBIC), + Coimbatore, India, 2009, pp. 210-214, https://doi.org/10.1109/NABIC.2009.5393690. + + - S. Walton, O. Hassan, K. Morgan, M.R. Brown, + Modified cuckoo search: A new gradient free optimisation algorithm, + Chaos, Solitons & Fractals, Volume 44, Issue 9, 2011, + Pages 710-718, ISSN 0960-0779, + https://doi.org/10.1016/j.chaos.2011.06.004. + """ + + def __init__(self, x0, sigma0=0.05, boundaries=None, pa=0.25): + super().__init__(x0, sigma0, boundaries=boundaries) + + # Problem dimensionality + self._dim = len(x0) + + # Population size and abandon rate + self._n = self._population_size + self._pa = pa + self.step_size = self._sigma0 + self.beta = 1.5 + + # Set states + self._running = False + self._ready_for_tell = False + + # Initialise nests + if self._boundaries: + self._nests = np.random.uniform( + low=self._boundaries.lower(), + high=self._boundaries.upper(), + size=(self._n, self._dim), + ) + else: + self._nests = np.random.normal( + self._x0, self._sigma0, size=(self._n, self._dim) + ) + + self._fitness = np.full(self._n, np.inf) + + # Initialise best solutions + self._x_best = np.copy(x0) + self._f_best = np.inf + + # Set iteration count + self._iterations = 0 + + def ask(self): + """ + Returns a list of next points in the parameter-space + to evaluate from the optimiser. + """ + # Set flag to indicate that the optimiser is ready to receive replies + self._ready_for_tell = True + self._running = True + + # Generate new solutions (cuckoos) by Lévy flights + self.step_size = self._sigma0 / max(1, np.sqrt(self._iterations)) + step = self.levy_flight(self.beta, self._dim) * self.step_size + self.cuckoos = self._nests + step + return self.clip_nests(self.cuckoos) + + def tell(self, replies): + """ + Receives a list of function values from the cost function from points + previously specified by `self.ask()`, and updates the optimiser state + accordingly. + """ + # Update iteration count + self._iterations += 1 + + # Compare cuckoos with current nests + for i in range(self._n): + f_new = replies[i] + if f_new < self._fitness[i]: + self._nests[i] = self.cuckoos[i] + self._fitness[i] = f_new + if f_new < self._f_best: + self._f_best = f_new + self._x_best = self.cuckoos[i] + + # Abandon some worse nests + n_abandon = int(self._pa * self._n) + worst_nests = np.argsort(self._fitness)[-n_abandon:] + for idx in worst_nests: + self.abandon_nests(idx) + self._fitness[idx] = np.inf # reset fitness + + def levy_flight(self, alpha, size): + """ + Generate step sizes via the Mantegna's algorithm for Levy flights + """ + from numpy import pi, power, random, sin + + sigma_u = power( + (gamma(1 + alpha) * sin(pi * alpha / 2)) + / (gamma((1 + alpha) / 2) * alpha * power(2, (alpha - 1) / 2)), + 1 / alpha, + ) + sigma_v = 1 + + u = random.normal(0, sigma_u, size=size) + v = random.normal(0, sigma_v, size=size) + step = u / power(abs(v), 1 / alpha) + + return step + + def abandon_nests(self, idx): + """ + Updates the nests to abandon the worst performers and reinitialise. + """ + if self._boundaries: + self._nests[idx] = np.random.uniform( + low=self._boundaries.lower(), + high=self._boundaries.upper(), + ) + else: + self._nests[idx] = np.random.normal(self._x0, self._sigma0) + + def clip_nests(self, x): + """ + Clip the input array to the boundaries if available. + """ + if self._boundaries: + x = np.clip(x, self._boundaries.lower(), self._boundaries.upper()) + return x + + def _suggested_population_size(self): + """ + Inherited from Pints:PopulationBasedOptimiser. + Returns a suggested population size, based on the + dimension of the parameter space. + """ + return 4 + int(3 * np.log(self._n_parameters)) + + def running(self): + """ + Returns ``True`` if the optimisation is in progress. + """ + return self._running + + def x_best(self): + """ + Returns the best parameter values found so far. + """ + return self._x_best + + def f_best(self): + """ + Returns the best score found so far. + """ + return self._f_best + + def name(self): + """ + Returns the name of the optimiser. + """ + return "Cuckoo Search" diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 921bf254..0286137f 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -2,7 +2,7 @@ import numpy as np -from pybop import BaseCost, BaseLikelihood, DesignCost +from pybop import BaseCost, BaseLikelihood, DesignCost, Parameter, Parameters class BaseOptimiser: @@ -50,6 +50,7 @@ def __init__( **optimiser_kwargs, ): # First set attributes to default values + self.parameters = Parameters() self.x0 = None self.bounds = None self.sigma0 = 0.1 @@ -64,29 +65,26 @@ def __init__( if isinstance(cost, BaseCost): self.cost = cost - self.x0 = cost.x0 + self.transformation = self.cost.transformation + self.parameters.join(cost.parameters) self.set_allow_infeasible_solutions() if isinstance(cost, (BaseLikelihood, DesignCost)): self.minimising = False - # Set default bounds (for all or no parameters) - self.bounds = cost.parameters.get_bounds() - - # Set default initial standard deviation (for all or no parameters) - self.sigma0 = cost.parameters.get_sigma0() or self.sigma0 - - # Set transformation - self.transformation = cost.transformation - else: try: - cost_test = cost(optimiser_kwargs.get("x0", [])) + self.x0 = optimiser_kwargs.get("x0", []) + cost_test = cost(self.x0) warnings.warn( "The cost is not an instance of pybop.BaseCost, but let's continue " + "assuming that it is a callable function to be minimised.", UserWarning, ) self.cost = cost + for i, value in enumerate(self.x0): + self.parameters.add( + Parameter(name=f"Parameter {i}", initial_value=value) + ) self.minimising = True except Exception: @@ -97,6 +95,9 @@ def __init__( f"Cost returned {type(cost_test)}, not a scalar numeric value." ) + if len(self.parameters) == 0: + raise ValueError("There are no parameters to optimise.") + self.unset_options = optimiser_kwargs self.set_base_options() self._set_up_optimiser() @@ -113,9 +114,19 @@ def set_base_options(self): """ Update the base optimiser options and remove them from the options dictionary. """ - self.x0 = self.unset_options.pop("x0", self.x0) - self.bounds = self.unset_options.pop("bounds", self.bounds) - self.sigma0 = self.unset_options.pop("sigma0", self.sigma0) + # Set initial values, if x0 is None, initial values are unmodified. + self.parameters.update(initial_values=self.unset_options.pop("x0", None)) + self.x0 = self.parameters.initial_value() + + # Set default bounds (for all or no parameters) + self.bounds = self.unset_options.pop("bounds", self.parameters.get_bounds()) + + # Set default initial standard deviation (for all or no parameters) + self.sigma0 = self.unset_options.pop( + "sigma0", self.parameters.get_sigma0() or self.sigma0 + ) + + # Set other options self.verbose = self.unset_options.pop("verbose", self.verbose) self.minimising = self.unset_options.pop("minimising", self.minimising) self.transformation = self.unset_options.pop( @@ -154,8 +165,7 @@ def run(self): # Store the optimised parameters x = self.result.x - if hasattr(self.cost, "parameters"): - self.store_optimised_parameters(x) + self.parameters.update(values=x) # Check if parameters are viable if self.physical_viability: @@ -193,8 +203,12 @@ def store_optimised_parameters(self, x): def check_optimal_parameters(self, x): """ Check if the optimised parameters are physically viable. - """ + Parameters + ---------- + x : array-like + Optimised parameter values. + """ if self.cost.problem._model.check_params( inputs=x, allow_infeasible_solutions=False ): diff --git a/pybop/optimisers/base_pints_optimiser.py b/pybop/optimisers/base_pints_optimiser.py index 84a0ba71..cb8abd91 100644 --- a/pybop/optimisers/base_pints_optimiser.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -135,22 +135,20 @@ def _sanitise_inputs(self): # Convert bounds to PINTS boundaries if self.bounds is not None: - if issubclass( - self.pints_optimiser, - (PintsGradientDescent, PintsAdam, PintsNelderMead), - ): + ignored_optimisers = (PintsGradientDescent, PintsAdam, PintsNelderMead) + if issubclass(self.pints_optimiser, ignored_optimisers): print(f"NOTE: Boundaries ignored by {self.pints_optimiser}") self.bounds = None - elif issubclass(self.pints_optimiser, PintsPSO): - if not all( - np.isfinite(value) - for sublist in self.bounds.values() - for value in sublist - ): - raise ValueError( - "Either all bounds or no bounds must be set for Pints PSO." - ) else: + if issubclass(self.pints_optimiser, PintsPSO): + if not all( + np.isfinite(value) + for sublist in self.bounds.values() + for value in sublist + ): + raise ValueError( + f"Either all bounds or no bounds must be set for {self.pints_optimiser.__name__}." + ) self._boundaries = PintsRectangularBoundaries( self.bounds["lower"], self.bounds["upper"] ) diff --git a/pybop/optimisers/pints_optimisers.py b/pybop/optimisers/pints_optimisers.py index 4872973a..64b77b67 100644 --- a/pybop/optimisers/pints_optimisers.py +++ b/pybop/optimisers/pints_optimisers.py @@ -9,7 +9,7 @@ from pints import IRPropMin as PintsIRPropMin from pints import NelderMead as PintsNelderMead -from pybop import AdamWImpl, BasePintsOptimiser +from pybop import AdamWImpl, BasePintsOptimiser, CuckooSearchImpl class GradientDescent(BasePintsOptimiser): @@ -268,10 +268,38 @@ class CMAES(BasePintsOptimiser): """ def __init__(self, cost, **optimiser_kwargs): - x0 = optimiser_kwargs.pop("x0", cost.x0) - if x0 is not None and len(x0) == 1: + x0 = optimiser_kwargs.get("x0", cost.parameters.initial_value()) + if len(x0) == 1 or len(cost.parameters) == 1: raise ValueError( "CMAES requires optimisation of >= 2 parameters at once. " + "Please choose another optimiser." ) super().__init__(cost, PintsCMAES, **optimiser_kwargs) + + +class CuckooSearch(BasePintsOptimiser): + """ + Adapter for the Cuckoo Search optimiser in PyBOP. + + Cuckoo Search is a population-based optimisation algorithm inspired by the brood parasitism of some cuckoo species. + It is designed to be simple, efficient, and robust, and is suitable for global optimisation problems. + + Parameters + ---------- + **optimiser_kwargs : optional + Valid PyBOP option keys and their values, for example: + x0 : array_like + Initial parameter values. + sigma0 : float + Initial step size. + bounds : dict + A dictionary with 'lower' and 'upper' keys containing arrays for lower and + upper bounds on the parameters. + + See Also + -------- + pybop.CuckooSearch : PyBOP implementation of Cuckoo Search algorithm. + """ + + def __init__(self, cost, **optimiser_kwargs): + super().__init__(cost, CuckooSearchImpl, **optimiser_kwargs) diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index 84342304..544abfc8 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -162,8 +162,8 @@ def callback(intermediate_result: OptimizeResult): self._cost0 = np.abs(self.cost(self.x0)) if np.isinf(self._cost0): for i in range(1, self.num_resamples): - x0 = self.cost.parameters.rvs(1) - self._cost0 = np.abs(self.cost(x0)) + self.x0 = self.parameters.rvs(1)[0] + self._cost0 = np.abs(self.cost(self.x0)) if not np.isinf(self._cost0): break if np.isinf(self._cost0): diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 94549a31..c82b67cc 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -1,8 +1,13 @@ +import warnings from collections import OrderedDict -from typing import Dict, List +from typing import Dict, List, Union import numpy as np +from pybop._utils import is_numeric + +Inputs = Dict[str, float] + class Parameter: """ @@ -49,6 +54,8 @@ def __init__( self.initial_value = initial_value self.value = initial_value self.transformation = transformation + self.applied_prior_bounds = False + self.set_bounds(bounds) self.margin = 1e-4 self.set_bounds(bounds) @@ -83,7 +90,7 @@ def rvs(self, n_samples, random_state=None): return samples - def update(self, value=None, initial_value=None): + def update(self, initial_value=None, value=None): """ Update the parameter's current value. @@ -92,12 +99,12 @@ def update(self, value=None, initial_value=None): value : float The new value to be assigned to the parameter. """ - if value is not None: - self.value = value - elif initial_value is not None: + if initial_value is not None: self.initial_value = initial_value self.value = initial_value - else: + if value is not None: + self.value = value + if initial_value is None and value is None: raise ValueError("No value provided to update parameter") def __repr__(self): @@ -133,7 +140,7 @@ def set_margin(self, margin): self.margin = margin - def set_bounds(self, bounds=None): + def set_bounds(self, bounds=None, boundary_multiplier=6): """ Set the upper and lower bounds. @@ -142,6 +149,9 @@ def set_bounds(self, bounds=None): bounds : tuple, optional A tuple defining the lower and upper bounds for the parameter. Defaults to None. + boundary_multiplier : float, optional + Used to define the bounds when no bounds are passed but the parameter has + a prior distribution (default: 6). Raises ------ @@ -162,9 +172,26 @@ def set_bounds(self, bounds=None): else: self.lower_bound = bounds[0] self.upper_bound = bounds[1] - self.bounds = [self.lower_bound, self.upper_bound] - else: - self.bounds = bounds + bounds = [self.lower_bound, self.upper_bound] + + elif self.prior is not None: + self.applied_prior_bounds = True + self.lower_bound = self.prior.mean - boundary_multiplier * self.prior.sigma + self.upper_bound = self.prior.mean + boundary_multiplier * self.prior.sigma + bounds = [self.lower_bound, self.upper_bound] + print("Default bounds applied based on prior distribution.") + + self.bounds = bounds + + def get_initial_value(self) -> float: + """ + Return the initial value of each parameter. + """ + if self.initial_value is None: + sample = self.rvs(1) + self.update(initial_value=sample[0]) + + return self.initial_value class Parameters: @@ -199,7 +226,15 @@ def __getitem__(self, key: str) -> Parameter: ------- pybop.Parameter The Parameter object. + + Raises + ------ + ValueError + The key must be the name of one of the parameters. """ + if key not in self.param.keys(): + raise ValueError(f"The key {key} is not the name of a parameter.") + return self.param[key] def __len__(self) -> int: @@ -259,6 +294,20 @@ def remove(self, parameter_name): # Remove the parameter self.param.pop(parameter_name) + def join(self, parameters=None): + """ + Join two Parameters objects into the first by copying across each Parameter. + + Parameters + ---------- + parameters : pybop.Parameters + """ + for param in parameters: + if param not in self.param.values(): + self.add(param) + else: + print(f"Discarding duplicate {param.name}.") + def get_bounds(self) -> Dict: """ Get bounds, for either all or no parameters. @@ -279,12 +328,20 @@ def get_bounds(self) -> Dict: return bounds - def update(self, values): + def update(self, initial_values=None, values=None, bounds=None): """ Set value of each parameter. """ for i, param in enumerate(self.param.values()): - param.update(value=values[i]) + if initial_values is not None: + param.update(initial_value=initial_values[i]) + if values is not None: + param.update(value=values[i]) + if bounds is not None: + if isinstance(bounds, Dict): + param.set_bounds(bounds=[bounds["lower"][i], bounds["upper"][i]]) + else: + param.set_bounds(bounds=bounds[i]) def rvs(self, n_samples: int) -> List: """ @@ -344,7 +401,7 @@ def get_sigma0(self) -> List: sigma0 = None return sigma0 - def initial_value(self) -> List: + def initial_value(self) -> np.ndarray: """ Return the initial value of each parameter. """ @@ -352,13 +409,13 @@ def initial_value(self) -> List: for param in self.param.values(): if param.initial_value is None and param.prior is not None: - initial_value = param.rvs(1) - param.update(initial_value=initial_value[0]) + initial_value = param.rvs(1)[0] + param.update(initial_value=initial_value) initial_values.append(param.initial_value) - return initial_values + return np.asarray(initial_values) - def current_value(self) -> List: + def current_value(self) -> np.ndarray: """ Return the current value of each parameter. """ @@ -367,9 +424,9 @@ def current_value(self) -> List: for param in self.param.values(): current_values.append(param.value) - return current_values + return np.asarray(current_values) - def true_value(self) -> List: + def true_value(self) -> np.ndarray: """ Return the true value of each parameter. """ @@ -378,7 +435,7 @@ def true_value(self) -> List: for param in self.param.values(): true_values.append(param.true_value) - return true_values + return np.asarray(true_values) def get_transformations(self): """ @@ -403,7 +460,14 @@ def get_bounds_for_plotly(self): bounds = np.empty((len(self), 2)) for i, param in enumerate(self.param.values()): - if param.bounds is not None: + if param.applied_prior_bounds: + warnings.warn( + "Bounds were created from prior distributions. " + "Please provide bounds for better plotting results.", + UserWarning, + stacklevel=2, + ) + elif param.bounds is not None: bounds[i] = param.bounds else: raise ValueError("All parameters require bounds for plotting.") @@ -411,6 +475,43 @@ def get_bounds_for_plotly(self): return bounds def as_dict(self, values=None) -> Dict: + """ + Parameters + ---------- + values : list or str, optional + A list of parameter values or one of the strings "initial" or "true" which can be used + to obtain a dictionary of parameters. + + Returns + ------- + Inputs + A parameters dictionary. + """ if values is None: values = self.current_value() + elif isinstance(values, str): + if values == "initial": + values = self.initial_value() + elif values == "true": + values = self.true_value() return {key: values[i] for i, key in enumerate(self.param.keys())} + + def verify(self, inputs: Union[Inputs, None] = None): + """ + Verify that the inputs are an Inputs dictionary or numeric values + which can be used to construct an Inputs dictionary + + Parameters + ---------- + inputs : Inputs or numeric + """ + if inputs is None or isinstance(inputs, Dict): + return inputs + elif (isinstance(inputs, list) and all(is_numeric(x) for x in inputs)) or all( + is_numeric(x) for x in list(inputs) + ): + return self.as_dict(inputs) + else: + raise TypeError( + f"Inputs must be a dictionary or numeric. Received {type(inputs)}" + ) diff --git a/pybop/parameters/parameter_set.py b/pybop/parameters/parameter_set.py index a1ab63cd..43f3e999 100644 --- a/pybop/parameters/parameter_set.py +++ b/pybop/parameters/parameter_set.py @@ -1,7 +1,8 @@ import json import types +from typing import List -from pybamm import ParameterValues, parameter_sets +from pybamm import LithiumIonParameters, ParameterValues, parameter_sets class ParameterSet: @@ -34,6 +35,12 @@ def __setitem__(self, key, value): def __getitem__(self, key): return self.params[key] + def keys(self) -> List: + """ + A list of parameter names + """ + return list(self.params.keys()) + def import_parameters(self, json_path=None): """ Imports parameters from a JSON file specified by the `json_path` attribute. @@ -167,7 +174,7 @@ def is_json_serializable(self, value): return False @classmethod - def pybamm(cls, name): + def pybamm(cls, name, formation_concentrations=False): """ Retrieves a PyBaMM parameter set by name. @@ -175,6 +182,8 @@ def pybamm(cls, name): ---------- name : str The name of the PyBaMM parameter set to retrieve. + set_formation_concentrations : bool, optional + If True, re-calculates the initial concentrations of lithium in the active material (default: False). Returns ------- @@ -187,4 +196,44 @@ def pybamm(cls, name): if name not in list(parameter_sets): raise ValueError(msg) - return ParameterValues(name).copy() + parameter_set = ParameterValues(name).copy() + + if formation_concentrations: + set_formation_concentrations(parameter_set) + + return parameter_set + + +def set_formation_concentrations(parameter_set): + """ + Compute the concentration of lithium in the positive electrode assuming that + all lithium in the active material originated from the positive electrode. + + Parameters + ---------- + parameter_set : pybamm.ParameterValues + A PyBaMM parameter set containing standard lithium ion parameters. + """ + # Obtain the total amount of lithium in the active material + Q_Li_particles_init = parameter_set.evaluate( + LithiumIonParameters().Q_Li_particles_init + ) + + # Convert this total amount to a concentration in the positive electrode + c_init = ( + Q_Li_particles_init + * 3600 + / ( + parameter_set["Positive electrode active material volume fraction"] + * parameter_set["Positive electrode thickness [m]"] + * parameter_set["Electrode height [m]"] + * parameter_set["Electrode width [m]"] + * parameter_set["Faraday constant [C.mol-1]"] + ) + ) + + # Update the initial lithium concentrations + parameter_set.update({"Initial concentration in negative electrode [mol.m-3]": 0}) + parameter_set.update( + {"Initial concentration in positive electrode [mol.m-3]": c_init} + ) diff --git a/pybop/plotting/plot2d.py b/pybop/plotting/plot2d.py index 2e7d4f26..ee8d7057 100644 --- a/pybop/plotting/plot2d.py +++ b/pybop/plotting/plot2d.py @@ -1,4 +1,5 @@ import sys +import warnings import numpy as np from scipy.interpolate import griddata @@ -63,6 +64,24 @@ def plot2d( cost = cost_or_optim plot_optim = False + if len(cost.parameters) < 2: + raise ValueError("This cost function takes fewer than 2 parameters.") + + additional_values = [] + if len(cost.parameters) > 2: + warnings.warn( + "This cost function requires more than 2 parameters. " + + "Plotting in 2d with fixed values for the additional parameters.", + UserWarning, + ) + for ( + i, + param, + ) in enumerate(cost.parameters): + if i > 1: + additional_values.append(param.value) + print(f"Fixed {param.name}:", param.value) + # Set up parameter bounds if bounds is None: bounds = cost.parameters.get_bounds_for_plotly() @@ -77,19 +96,23 @@ def plot2d( # Populate cost matrix for i, xi in enumerate(x): for j, yj in enumerate(y): - costs[j, i] = cost(np.array([xi, yj])) + costs[j, i] = cost(np.asarray([xi, yj] + additional_values)) if gradient: grad_parameter_costs = [] # Determine the number of gradient outputs from cost.evaluateS1 - num_gradients = len(cost.evaluateS1(np.array([x[0], y[0]]))[1]) + num_gradients = len( + cost.evaluateS1(np.asarray([x[0], y[0]] + additional_values))[1] + ) # Create an array to hold each gradient output & populate grads = [np.zeros((len(y), len(x))) for _ in range(num_gradients)] for i, xi in enumerate(x): for j, yj in enumerate(y): - (*current_grads,) = cost.evaluateS1(np.array([xi, yj]))[1] + (*current_grads,) = cost.evaluateS1( + np.asarray([xi, yj] + additional_values) + )[1] for k, grad_output in enumerate(current_grads): grads[k][j, i] = grad_output @@ -103,7 +126,7 @@ def plot2d( flat_costs = costs.flatten() # Append the optimisation trace to the data - parameter_log = np.array(optim.log["x_best"]) + parameter_log = np.asarray(optim.log["x_best"]) flat_x = np.concatenate((flat_x, parameter_log[:, 0])) flat_y = np.concatenate((flat_y, parameter_log[:, 1])) flat_costs = np.concatenate((flat_costs, optim.log["cost"])) @@ -140,7 +163,9 @@ def plot2d( if plot_optim: # Plot the optimisation trace - optim_trace = np.array([item for sublist in optim.log["x"] for item in sublist]) + optim_trace = np.asarray( + [item[:2] for sublist in optim.log["x"] for item in sublist] + ) optim_trace = optim_trace.reshape(-1, 2) fig.add_trace( go.Scatter( diff --git a/pybop/plotting/plot_parameters.py b/pybop/plotting/plot_parameters.py index bc1f9a7a..94cb71cb 100644 --- a/pybop/plotting/plot_parameters.py +++ b/pybop/plotting/plot_parameters.py @@ -1,6 +1,6 @@ import sys -from pybop import StandardSubplot +from pybop import GaussianLogLikelihood, StandardSubplot def plot_parameters(optim, show=True, **layout_kwargs): @@ -51,6 +51,10 @@ def plot_parameters(optim, show=True, **layout_kwargs): for name in trace_names: axis_titles.append(("Function Call", name)) + if isinstance(optim.cost, GaussianLogLikelihood): + axis_titles.append(("Function Call", "Sigma")) + trace_names.append("Sigma") + # Set subplot layout options layout_options = dict( title="Parameter Convergence", diff --git a/pybop/plotting/plot_problem.py b/pybop/plotting/plot_problem.py index 968da94d..fb8759c9 100644 --- a/pybop/plotting/plot_problem.py +++ b/pybop/plotting/plot_problem.py @@ -3,9 +3,10 @@ import numpy as np from pybop import DesignProblem, FittingProblem, StandardPlot +from pybop.parameters.parameter import Inputs -def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs): +def quick_plot(problem, problem_inputs: Inputs = None, show=True, **layout_kwargs): """ Quickly plot the target dataset against optimised model output. @@ -16,7 +17,7 @@ def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs): ---------- problem : object Problem object with dataset and signal attributes. - parameter_values : array-like + problem_inputs : Inputs Optimised (or example) parameter values. show : bool, optional If True, the figure is shown upon creation (default: True). @@ -30,12 +31,14 @@ def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs): plotly.graph_objs.Figure The Plotly figure object for the scatter plot. """ - if parameter_values is None: - parameter_values = problem.x0 + if problem_inputs is None: + problem_inputs = problem.parameters.as_dict() + else: + problem_inputs = problem.parameters.verify(problem_inputs) # Extract the time data and evaluate the model's output and target values xaxis_data = problem.time_data() - model_output = problem.evaluate(parameter_values) + model_output = problem.evaluate(problem_inputs) target_output = problem.get_target() # Create a plot for each output diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index 48f53dab..4d9d8519 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -1,4 +1,5 @@ from pybop import BaseModel, Dataset, Parameter, Parameters +from pybop.parameters.parameter import Inputs class BaseProblem: @@ -65,21 +66,18 @@ def __init__( else: self.additional_variables = [] - # Set initial values - self.x0 = self.parameters.initial_value() - @property def n_parameters(self): return len(self.parameters) - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Raises ------ @@ -88,15 +86,15 @@ def evaluate(self, x): """ raise NotImplementedError - def evaluateS1(self, x): + def evaluateS1(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal and its derivatives. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Raises ------ diff --git a/pybop/problems/design_problem.py b/pybop/problems/design_problem.py index 3217ca95..a1efa22f 100644 --- a/pybop/problems/design_problem.py +++ b/pybop/problems/design_problem.py @@ -1,6 +1,7 @@ import numpy as np from pybop import BaseProblem +from pybop.parameters.parameter import Inputs class DesignProblem(BaseProblem): @@ -54,7 +55,7 @@ def __init__( # Build the model if required if experiment is not None: # Leave the build until later to apply the experiment - self._model.parameters = self.parameters + self._model.classify_and_update_parameters(self.parameters) elif self._model._built_model is None: self._model.build( @@ -65,27 +66,29 @@ def __init__( ) # Add an example dataset for plotting comparison - sol = self.evaluate(self.x0) + sol = self.evaluate(self.parameters.as_dict("initial")) self._time_data = sol["Time [s]"] self._target = {key: sol[key] for key in self.signal} self._dataset = None - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with inputs. """ + inputs = self.parameters.verify(inputs) + sol = self._model.predict( - inputs=x, + inputs=inputs, experiment=self.experiment, init_soc=self.init_soc, ) diff --git a/pybop/problems/fitting_problem.py b/pybop/problems/fitting_problem.py index 15d1ed7e..b2795547 100644 --- a/pybop/problems/fitting_problem.py +++ b/pybop/problems/fitting_problem.py @@ -1,6 +1,7 @@ import numpy as np from pybop import BaseProblem +from pybop.parameters.parameter import Inputs class FittingProblem(BaseProblem): @@ -43,7 +44,7 @@ def __init__( parameters, model, check_model, signal, additional_variables, init_soc ) self._dataset = dataset.data - self.x = self.x0 + self.parameters.initial_value() # Check that the dataset contains time and current dataset.check(self.signal + ["Current function [A]"]) @@ -74,51 +75,61 @@ def __init__( init_soc=self.init_soc, ) - def evaluate(self, x): + def evaluate(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- y : np.ndarray - The model output y(t) simulated with inputs x. + The model output y(t) simulated with given inputs. """ - if np.any(x != self.x) and self._model.rebuild_parameters: - self.parameters.update(values=x) + inputs = self.parameters.verify(inputs) + + requires_rebuild = False + for key, value in inputs.items(): + if key in self._model.rebuild_parameters: + current_value = self.parameters[key].value + if value != current_value: + self.parameters[key].update(value=value) + requires_rebuild = True + + if requires_rebuild: self._model.rebuild(parameters=self.parameters) - self.x = x - y = self._model.simulate(inputs=x, t_eval=self._time_data) + y = self._model.simulate(inputs=inputs, t_eval=self._time_data) return y - def evaluateS1(self, x): + def evaluateS1(self, inputs: Inputs): """ Evaluate the model with the given parameters and return the signal and its derivatives. Parameters ---------- - x : np.ndarray - Parameter values to evaluate the model at. + inputs : Inputs + Parameters for evaluation of the model. Returns ------- tuple A tuple containing the simulation result y(t) and the sensitivities dy/dx(t) evaluated - with given inputs x. + with given inputs. """ + inputs = self.parameters.verify(inputs) + if self._model.rebuild_parameters: raise RuntimeError( "Gradient not available when using geometric parameters." ) y, dy = self._model.simulateS1( - inputs=x, + inputs=inputs, t_eval=self._time_data, ) diff --git a/pybop/transformation/__init__.py b/pybop/transformation/__init__.py index 20c7819f..e96e9fea 100644 --- a/pybop/transformation/__init__.py +++ b/pybop/transformation/__init__.py @@ -102,6 +102,10 @@ def _verify_input(self, input: Union[List[float], float, np.ndarray]) -> None: """Set and validate the translate parameter.""" if isinstance(input, (float, int)): input = np.full(self._n_parameters, float(input)) + if isinstance(input, dict): + if len(input) != self._n_parameters: + raise ValueError(f"Translate must have {self._n_parameters} elements") + input = np.asarray([k for k in input.values()], dtype=float) elif isinstance(input, (list, np.ndarray)): if len(input) != self._n_parameters: raise ValueError(f"Translate must have {self._n_parameters} elements") diff --git a/pyproject.toml b/pyproject.toml index 6d2e1b61..99067e24 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "pybop" -version = "24.3.1" +version = "24.6" authors = [ {name = "The PyBOP Team"}, ] diff --git a/tests/integration/test_model_experiment_changes.py b/tests/integration/test_model_experiment_changes.py index 6902f873..64d27132 100644 --- a/tests/integration/test_model_experiment_changes.py +++ b/tests/integration/test_model_experiment_changes.py @@ -48,7 +48,9 @@ def test_changing_experiment(self, parameters): experiment = pybop.Experiment(["Charge at 1C until 4.1 V (2 seconds period)"]) solution_2 = model.predict( - init_soc=init_soc, experiment=experiment, inputs=parameters.true_value() + init_soc=init_soc, + experiment=experiment, + inputs=parameters.as_dict("true"), ) cost_2 = self.final_cost(solution_2, model, parameters, init_soc) diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index 3103ef35..34a6e8c6 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -13,7 +13,7 @@ class TestOptimisation: @pytest.fixture(autouse=True) def setup(self): - self.ground_truth = np.array([0.55, 0.55]) + np.random.normal( + self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( loc=0.0, scale=0.05, size=2 ) @@ -67,7 +67,7 @@ def spm_costs(self, model, parameters, cost_class): # Define the cost to optimise problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: - return cost_class(problem, sigma=[0.03, 0.03]) + return cost_class(problem, sigma0=0.002) else: return cost_class(problem) @@ -80,7 +80,7 @@ def spm_costs(self, model, parameters, cost_class): ) @pytest.mark.integration def test_optimisation_f_guessed(self, f_guessed, spm_costs): - x0 = spm_costs.x0 + x0 = spm_costs.parameters.initial_value() # Test each optimiser optim = pybop.XNES( cost=spm_costs, @@ -109,7 +109,7 @@ def test_optimisation_f_guessed(self, f_guessed, spm_costs): np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x, init_soc): - model.parameters = parameters + model.classify_and_update_parameters(parameters) experiment = pybop.Experiment( [ ( diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 60ef4381..d7acaf7d 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -11,7 +11,8 @@ class Test_SPM_Parameterisation: @pytest.fixture(autouse=True) def setup(self): - self.ground_truth = np.array([0.55, 0.55]) + np.random.normal( + self.sigma0 = 0.002 + self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( loc=0.0, scale=0.05, size=2 ) @@ -25,12 +26,12 @@ def parameters(self): return pybop.Parameters( pybop.Parameter( "Negative electrode active material volume fraction", - prior=pybop.Uniform(0.4, 0.7), - bounds=[0.375, 0.725], + prior=pybop.Uniform(0.4, 0.75), + bounds=[0.375, 0.75], ), pybop.Parameter( "Positive electrode active material volume fraction", - prior=pybop.Uniform(0.4, 0.7), + prior=pybop.Uniform(0.4, 0.75), # no bounds ), ) @@ -42,6 +43,7 @@ def init_soc(self, request): @pytest.fixture( params=[ pybop.GaussianLogLikelihoodKnownSigma, + pybop.GaussianLogLikelihood, pybop.RootMeanSquaredError, pybop.SumSquaredError, pybop.MAP, @@ -62,17 +64,19 @@ def spm_costs(self, model, parameters, cost_class, init_soc): "Time [s]": solution["Time [s]"].data, "Current function [A]": solution["Current [A]"].data, "Voltage [V]": solution["Voltage [V]"].data - + self.noise(0.002, len(solution["Time [s]"].data)), + + self.noise(self.sigma0, len(solution["Time [s]"].data)), } ) # Define the cost to optimise problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: - return cost_class(problem, sigma=[0.03, 0.03]) + return cost_class(problem, sigma0=self.sigma0) + elif cost_class in [pybop.GaussianLogLikelihood]: + return cost_class(problem, sigma0=self.sigma0 * 4) # Initial sigma0 guess elif cost_class in [pybop.MAP]: return cost_class( - problem, pybop.GaussianLogLikelihoodKnownSigma, sigma=[0.03, 0.03] + problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0=self.sigma0 ) else: return cost_class(problem) @@ -83,6 +87,7 @@ def spm_costs(self, model, parameters, cost_class, init_soc): pybop.SciPyDifferentialEvolution, pybop.AdamW, pybop.CMAES, + pybop.CuckooSearch, pybop.IRPropMin, pybop.NelderMead, pybop.SNES, @@ -91,40 +96,54 @@ def spm_costs(self, model, parameters, cost_class, init_soc): ) @pytest.mark.integration def test_spm_optimisers(self, optimiser, spm_costs): - x0 = spm_costs.x0 - # Some optimisers require a complete set of bounds - if optimiser in [ - pybop.SciPyDifferentialEvolution, - ]: - spm_costs.problem.parameters[ - "Positive electrode active material volume fraction" - ].set_bounds([0.375, 0.725]) # Large range to ensure IC within bounds - bounds = spm_costs.problem.parameters.get_bounds() - spm_costs.problem.bounds = bounds - spm_costs.bounds = bounds + x0 = spm_costs.parameters.initial_value() + common_args = { + "cost": spm_costs, + "max_iterations": 250, + "absolute_tolerance": 1e-6, + } - # Test each optimiser - if optimiser in [pybop.PSO]: - optim = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.05, max_iterations=250 + # Add sigma0 to ground truth for GaussianLogLikelihood + if isinstance(spm_costs, pybop.GaussianLogLikelihood): + self.ground_truth = np.concatenate( + (self.ground_truth, np.asarray([self.sigma0])) ) - else: - optim = optimiser(cost=spm_costs, sigma0=0.05, max_iterations=250) + if isinstance(spm_costs, pybop.MAP): + for i in spm_costs.parameters.keys(): + spm_costs.parameters[i].prior = pybop.Uniform( + 0.4, 2.0 + ) # Increase range to avoid prior == np.inf + # Set sigma0 and create optimiser + sigma0 = 0.05 if isinstance(spm_costs, pybop.MAP) else None + optim = optimiser(sigma0=sigma0, **common_args) + + # Set max unchanged iterations for BasePintsOptimisers if issubclass(optimiser, pybop.BasePintsOptimiser): - optim.set_max_unchanged_iterations(iterations=35, absolute_tolerance=1e-5) + optim.set_max_unchanged_iterations(iterations=55) + + # AdamW will use lowest sigma0 for learning rate, so allow more iterations + if issubclass(optimiser, (pybop.AdamW, pybop.IRPropMin)) and isinstance( + spm_costs, pybop.GaussianLogLikelihood + ): + optim = optimiser(max_unchanged_iterations=75, **common_args) initial_cost = optim.cost(x0) x, final_cost = optim.run() # Assertions - if not np.allclose(x0, self.ground_truth, atol=1e-5): - if optim.minimising: - assert initial_cost > final_cost - else: - assert initial_cost < final_cost + if np.allclose(x0, self.ground_truth, atol=1e-5): + raise AssertionError("Initial guess is too close to ground truth") + + if isinstance(spm_costs, pybop.GaussianLogLikelihood): + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + np.testing.assert_allclose(x[-1], self.sigma0, atol=5e-4) else: - raise ValueError("Initial value is the same as the ground truth value.") - np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + assert ( + (initial_cost > final_cost) + if optim.minimising + else (initial_cost < final_cost) + ) + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, cost_class): @@ -136,11 +155,11 @@ def spm_two_signal_cost(self, parameters, model, cost_class): "Time [s]": solution["Time [s]"].data, "Current function [A]": solution["Current [A]"].data, "Voltage [V]": solution["Voltage [V]"].data - + self.noise(0.002, len(solution["Time [s]"].data)), + + self.noise(self.sigma0, len(solution["Time [s]"].data)), "Bulk open-circuit voltage [V]": solution[ "Bulk open-circuit voltage [V]" ].data - + self.noise(0.002, len(solution["Time [s]"].data)), + + self.noise(self.sigma0, len(solution["Time [s]"].data)), } ) @@ -151,9 +170,11 @@ def spm_two_signal_cost(self, parameters, model, cost_class): ) if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: - return cost_class(problem, sigma=[0.05, 0.05]) + return cost_class(problem, sigma0=self.sigma0) elif cost_class in [pybop.MAP]: - return cost_class(problem, pybop.GaussianLogLikelihoodKnownSigma) + return cost_class( + problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0=self.sigma0 + ) else: return cost_class(problem) @@ -167,15 +188,8 @@ def spm_two_signal_cost(self, parameters, model, cost_class): ) @pytest.mark.integration def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): - x0 = spm_two_signal_cost.x0 - # Some optimisers require a complete set of bounds - if multi_optimiser in [pybop.SciPyDifferentialEvolution]: - spm_two_signal_cost.problem.parameters[ - "Positive electrode active material volume fraction" - ].set_bounds([0.375, 0.725]) # Large range to ensure IC within bounds - bounds = spm_two_signal_cost.problem.parameters.get_bounds() - spm_two_signal_cost.problem.bounds = bounds - spm_two_signal_cost.bounds = bounds + x0 = spm_two_signal_cost.parameters.initial_value() + combined_sigma0 = np.asarray([self.sigma0, self.sigma0]) # Test each optimiser optim = multi_optimiser( @@ -183,19 +197,31 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): sigma0=0.03, max_iterations=250, ) + + # Add sigma0 to ground truth for GaussianLogLikelihood + if isinstance(spm_two_signal_cost, pybop.GaussianLogLikelihood): + self.ground_truth = np.concatenate((self.ground_truth, combined_sigma0)) + if issubclass(multi_optimiser, pybop.BasePintsOptimiser): optim.set_max_unchanged_iterations(iterations=35, absolute_tolerance=1e-5) - initial_cost = optim.cost(spm_two_signal_cost.x0) + initial_cost = optim.cost(optim.parameters.initial_value()) x, final_cost = optim.run() # Assertions - if not np.allclose(x0, self.ground_truth, atol=1e-5): - if optim.minimising: - assert initial_cost > final_cost - else: - assert initial_cost < final_cost - np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + if np.allclose(x0, self.ground_truth, atol=1e-5): + raise AssertionError("Initial guess is too close to ground truth") + + if isinstance(spm_two_signal_cost, pybop.GaussianLogLikelihood): + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + np.testing.assert_allclose(x[-2:], combined_sigma0, atol=5e-4) + else: + assert ( + (initial_cost > final_cost) + if optim.minimising + else (initial_cost < final_cost) + ) + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) @pytest.mark.parametrize("init_soc", [0.4, 0.6]) @pytest.mark.integration @@ -220,11 +246,11 @@ def test_model_misparameterisation(self, parameters, model, init_soc): cost = pybop.RootMeanSquaredError(problem) # Select optimiser - optimiser = pybop.CMAES + optimiser = pybop.XNES # Build the optimisation problem optim = optimiser(cost=cost) - initial_cost = optim.cost(cost.x0) + initial_cost = optim.cost(optim.x0) # Run the optimisation problem x, final_cost = optim.run() @@ -237,15 +263,14 @@ def test_model_misparameterisation(self, parameters, model, init_soc): np.testing.assert_allclose(x, self.ground_truth, atol=2e-2) def get_data(self, model, parameters, x, init_soc): - model.parameters = parameters + model.classify_and_update_parameters(parameters) experiment = pybop.Experiment( [ ( - "Discharge at 0.5C for 6 minutes (4 second period)", - "Charge at 0.5C for 6 minutes (4 second period)", + "Discharge at 0.5C for 3 minutes (4 second period)", + "Charge at 0.5C for 3 minutes (4 second period)", ), ] - * 2 ) sim = model.predict(init_soc=init_soc, experiment=experiment, inputs=x) return sim diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 37d67b62..fbb9f6cb 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -11,7 +11,7 @@ class TestTheveninParameterisation: @pytest.fixture(autouse=True) def setup(self): - self.ground_truth = np.array([0.05, 0.05]) + np.random.normal( + self.ground_truth = np.asarray([0.05, 0.05]) + np.random.normal( loc=0.0, scale=0.01, size=2 ) @@ -65,7 +65,7 @@ def cost(self, model, parameters, cost_class): ) @pytest.mark.integration def test_optimisers_on_simple_model(self, optimiser, cost): - x0 = cost.x0 + x0 = cost.parameters.initial_value() if optimiser in [pybop.GradientDescent]: optim = optimiser( cost=cost, @@ -81,7 +81,7 @@ def test_optimisers_on_simple_model(self, optimiser, cost): if isinstance(optimiser, pybop.BasePintsOptimiser): optim.set_max_unchanged_iterations(iterations=35, absolute_tolerance=1e-5) - initial_cost = optim.cost(x0) + initial_cost = optim.cost(optim.parameters.initial_value()) x, final_cost = optim.run() # Assertions @@ -95,7 +95,7 @@ def test_optimisers_on_simple_model(self, optimiser, cost): np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x): - model.parameters = parameters + model.classify_and_update_parameters(parameters) experiment = pybop.Experiment( [ ( diff --git a/tests/plotting/test_plotly_manager.py b/tests/plotting/test_plotly_manager.py index 80bc3bb5..ba0adbd8 100644 --- a/tests/plotting/test_plotly_manager.py +++ b/tests/plotting/test_plotly_manager.py @@ -95,7 +95,7 @@ def test_cancel_installation(mocker, uninstall_plotly_if_installed): with pytest.raises(SystemExit) as pytest_wrapped_e: PlotlyManager().prompt_for_plotly_installation() - assert pytest_wrapped_e.type == SystemExit + assert pytest_wrapped_e.type is SystemExit assert pytest_wrapped_e.value.code == 1 assert not is_package_installed("plotly") diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 3c0d8151..80e44bea 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -9,6 +9,11 @@ class TestCosts: Class for tests cost functions """ + # Define an invalid likelihood class for MAP tests + class InvalidLikelihood: + def __init__(self, problem, sigma0): + pass + @pytest.fixture def model(self): return pybop.lithium_ion.SPM() @@ -113,15 +118,40 @@ def test_base(self, problem): with pytest.raises(NotImplementedError): base_cost.evaluateS1([0.5]) + @pytest.mark.unit + def test_error_in_cost_calculation(self, problem): + class RaiseErrorCost(pybop.BaseCost): + def _evaluate(self, inputs, grad=None): + raise ValueError("Error test.") + + def _evaluateS1(self, inputs): + raise ValueError("Error test.") + + cost = RaiseErrorCost(problem) + with pytest.raises(ValueError, match="Error in cost calculation: Error test."): + cost([0.5]) + with pytest.raises(ValueError, match="Error in cost calculation: Error test."): + cost.evaluateS1([0.5]) + @pytest.mark.unit def test_MAP(self, problem): # Incorrect likelihood - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match="An error occurred when constructing the Likelihood class:", + ): pybop.MAP(problem, pybop.SumSquaredError) # Incorrect construction of likelihood - with pytest.raises(ValueError): - pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma, sigma="string") + with pytest.raises( + ValueError, + match="An error occurred when constructing the Likelihood class: could not convert string to float: 'string'", + ): + pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0="string") + + # Incorrect likelihood + with pytest.raises(ValueError, match="must be a subclass of BaseLikelihood"): + pybop.MAP(problem, self.InvalidLikelihood, sigma0=0.1) @pytest.mark.unit def test_costs(self, cost): @@ -158,7 +188,9 @@ def test_costs(self, cost): assert type(de) == np.ndarray # Test exception for non-numeric inputs - with pytest.raises(ValueError): + with pytest.raises( + TypeError, match="Inputs must be a dictionary or numeric." + ): cost.evaluateS1(["StringInputShouldNotWork"]) with pytest.warns(UserWarning) as record: @@ -175,7 +207,7 @@ def test_costs(self, cost): assert cost.evaluateS1([0.01]) == (np.inf, cost._de) # Test exception for non-numeric inputs - with pytest.raises(ValueError): + with pytest.raises(TypeError, match="Inputs must be a dictionary or numeric."): cost(["StringInputShouldNotWork"]) # Test treatment of simulations that terminated early @@ -224,7 +256,9 @@ def test_design_costs( assert cost([1.1]) == -np.inf # Test exception for non-numeric inputs - with pytest.raises(ValueError): + with pytest.raises( + TypeError, match="Inputs must be a dictionary or numeric." + ): cost(["StringInputShouldNotWork"]) # Compute after updating nominal capacity diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index cc55ca73..56ae1e44 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -76,7 +76,6 @@ def test_base_likelihood_init(self, problem_name, n_outputs, request): assert likelihood.problem == problem assert likelihood.n_outputs == n_outputs assert likelihood.n_time_data == problem.n_time_data - assert likelihood.x0 == problem.x0 assert likelihood.n_parameters == 1 assert np.array_equal(likelihood._target, problem._target) @@ -89,21 +88,22 @@ def test_base_likelihood_call_raises_not_implemented_error( likelihood(np.array([0.5])) @pytest.mark.unit - def test_set_get_sigma(self, one_signal_problem): - likelihood = pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, 0.1) - likelihood.set_sigma(np.array([0.3])) - assert np.array_equal(likelihood.get_sigma(), np.array([0.3])) - + def test_likelihood_check_sigma0(self, one_signal_problem): with pytest.raises( ValueError, - match="The GaussianLogLikelihoodKnownSigma cost requires sigma to be " - + "either a scalar value or an array with one entry per dimension.", + match="Sigma0 must be positive", ): - pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, sigma=None) + pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, sigma0=None) likelihood = pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, 0.1) - with pytest.raises(ValueError): - likelihood.set_sigma(np.array([-0.2])) + sigma = likelihood.check_sigma0(0.2) + assert sigma == np.array(0.2) + + with pytest.raises( + ValueError, + match=r"sigma0 must be either a scalar value", + ): + pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, sigma0=[0.2, 0.3]) @pytest.mark.unit def test_base_likelihood_n_parameters_property(self, one_signal_problem): @@ -117,7 +117,7 @@ def test_base_likelihood_n_parameters_property(self, one_signal_problem): def test_gaussian_log_likelihood_known_sigma(self, problem_name, request): problem = request.getfixturevalue(problem_name) likelihood = pybop.GaussianLogLikelihoodKnownSigma( - problem, sigma=np.array([1.0]) + problem, sigma0=np.array([1.0]) ) result = likelihood(np.array([0.5])) grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5])) @@ -134,6 +134,30 @@ def test_gaussian_log_likelihood(self, one_signal_problem): np.testing.assert_allclose(result, grad_result, atol=1e-5) assert np.all(grad_likelihood <= 0) + # Test construction with sigma as a Parameter + sigma = pybop.Parameter("sigma", prior=pybop.Uniform(0.4, 0.6)) + likelihood = pybop.GaussianLogLikelihood(one_signal_problem, sigma0=sigma) + + # Test invalid sigma + with pytest.raises( + TypeError, + match=r"Expected sigma0 to contain Parameter objects or numeric values.", + ): + likelihood = pybop.GaussianLogLikelihood( + one_signal_problem, sigma0="Invalid string" + ) + + @pytest.mark.unit + def test_gaussian_log_likelihood_dsigma_scale(self, one_signal_problem): + likelihood = pybop.GaussianLogLikelihood(one_signal_problem, dsigma_scale=0.05) + assert likelihood.dsigma_scale == 0.05 + likelihood.dsigma_scale = 1e3 + assert likelihood.dsigma_scale == 1e3 + + # Test incorrect sigma scale + with pytest.raises(ValueError): + likelihood.dsigma_scale = -1e3 + @pytest.mark.unit def test_gaussian_log_likelihood_returns_negative_inf(self, one_signal_problem): likelihood = pybop.GaussianLogLikelihood(one_signal_problem) @@ -151,7 +175,7 @@ def test_gaussian_log_likelihood_known_sigma_returns_negative_inf( self, one_signal_problem ): likelihood = pybop.GaussianLogLikelihoodKnownSigma( - one_signal_problem, sigma=np.array([0.2]) + one_signal_problem, sigma0=np.array([0.2]) ) assert likelihood(np.array([0.01])) == -np.inf # parameter value too small assert ( diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index 9c11b4c6..b50a14bf 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -137,10 +137,19 @@ def test_build(self, model): @pytest.mark.unit def test_rebuild(self, model): + # Test rebuild before build + with pytest.raises( + ValueError, match="Model must be built before calling rebuild" + ): + model.rebuild() + model.build() initial_built_model = model._built_model assert model._built_model is not None + model.set_params() + assert model.model_with_set_params is not None + # Test that the model can be built again model.rebuild() rebuilt_model = model._built_model @@ -252,11 +261,17 @@ def test_reinit(self): k = 0.1 y0 = 1 model = ExponentialDecay(pybamm.ParameterValues({"k": k, "y0": y0})) + + with pytest.raises( + ValueError, match="Model must be built before calling get_state" + ): + model.get_state({"k": k, "y0": y0}, 0, np.array([0])) + model.build() state = model.reinit(inputs={}) np.testing.assert_array_almost_equal(state.as_ndarray(), np.array([[y0]])) - model.parameters = pybop.Parameters(pybop.Parameter("y0")) + model.classify_and_update_parameters(pybop.Parameters(pybop.Parameter("y0"))) state = model.reinit(inputs=[1]) np.testing.assert_array_almost_equal(state.as_ndarray(), np.array([[y0]])) @@ -296,6 +311,9 @@ def test_basemodel(self): with pytest.raises(NotImplementedError): base.approximate_capacity(x) + base.classify_and_update_parameters(parameters=None) + assert base._n_parameters == 0 + @pytest.mark.unit def test_thevenin_model(self): parameter_set = pybop.ParameterSet( @@ -317,7 +335,7 @@ def test_check_params(self): assert base.check_params() assert base.check_params(inputs={"a": 1}) assert base.check_params(inputs=[1]) - with pytest.raises(ValueError, match="Expecting inputs in the form of"): + with pytest.raises(TypeError, match="Inputs must be a dictionary or numeric."): base.check_params(inputs=["unexpected_string"]) @pytest.mark.unit diff --git a/tests/unit/test_observer_unscented_kalman.py b/tests/unit/test_observer_unscented_kalman.py index 2a947e71..ce60abbc 100644 --- a/tests/unit/test_observer_unscented_kalman.py +++ b/tests/unit/test_observer_unscented_kalman.py @@ -14,15 +14,6 @@ class TestUKF: measure_noise = 1e-4 - @pytest.fixture(params=[1, 2, 3]) - def model(self, request): - model = ExponentialDecay( - parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), - n_states=request.param, - ) - model.build() - return model - @pytest.fixture def parameters(self): return pybop.Parameters( @@ -40,6 +31,15 @@ def parameters(self): ), ) + @pytest.fixture(params=[1, 2, 3]) + def model(self, parameters, request): + model = ExponentialDecay( + parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), + n_states=request.param, + ) + model.build(parameters=parameters) + return model + @pytest.fixture def dataset(self, model: pybop.BaseModel, parameters): observer = pybop.Observer(parameters, model, signal=["2y"]) diff --git a/tests/unit/test_observers.py b/tests/unit/test_observers.py index 46987bae..2d2e3bc6 100644 --- a/tests/unit/test_observers.py +++ b/tests/unit/test_observers.py @@ -11,15 +11,6 @@ class TestObserver: A class to test the observer class. """ - @pytest.fixture(params=[1, 2]) - def model(self, request): - model = ExponentialDecay( - parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), - n_states=request.param, - ) - model.build() - return model - @pytest.fixture def parameters(self): return pybop.Parameters( @@ -37,6 +28,15 @@ def parameters(self): ), ) + @pytest.fixture(params=[1, 2]) + def model(self, parameters, request): + model = ExponentialDecay( + parameter_set=pybamm.ParameterValues({"k": "[input]", "y0": "[input]"}), + n_states=request.param, + ) + model.build(parameters=parameters) + return model + @pytest.mark.unit def test_observer(self, model, parameters): n = model.n_states @@ -72,8 +72,8 @@ def test_observer(self, model, parameters): # Test evaluate with different inputs observer._time_data = t_eval - observer.evaluate(parameters.initial_value()) - observer.evaluate(parameters) + observer.evaluate(parameters.as_dict()) + observer.evaluate(parameters.current_value()) # Test evaluate with dataset observer._dataset = pybop.Dataset( @@ -83,7 +83,7 @@ def test_observer(self, model, parameters): } ) observer._target = {"2y": expected} - observer.evaluate(parameters.initial_value()) + observer.evaluate(parameters.as_dict()) @pytest.mark.unit def test_unbuilt_model(self, parameters): diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 97fe12fc..8444159b 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -75,6 +75,7 @@ def two_param_cost(self, model, two_parameters, dataset): (pybop.Adam, "Adam"), (pybop.AdamW, "AdamW"), (pybop.CMAES, "Covariance Matrix Adaptation Evolution Strategy (CMA-ES)"), + (pybop.CuckooSearch, "Cuckoo Search"), (pybop.SNES, "Seperable Natural Evolution Strategy (SNES)"), (pybop.XNES, "Exponential Natural Evolution Strategy (xNES)"), (pybop.PSO, "Particle Swarm Optimisation (PSO)"), @@ -104,6 +105,15 @@ def test_optimiser_classes(self, two_param_cost, optimiser, expected_name): if issubclass(optimiser, pybop.BasePintsOptimiser): assert optim._boundaries is None + @pytest.mark.unit + def test_no_optimisation_parameters(self, model, dataset): + problem = pybop.FittingProblem( + model=model, parameters=pybop.Parameters(), dataset=dataset + ) + cost = pybop.RootMeanSquaredError(problem) + with pytest.raises(ValueError, match="There are no parameters to optimise."): + pybop.Optimisation(cost=cost) + @pytest.mark.parametrize( "optimiser", [ @@ -117,6 +127,7 @@ def test_optimiser_classes(self, two_param_cost, optimiser, expected_name): pybop.PSO, pybop.IRPropMin, pybop.NelderMead, + pybop.CuckooSearch, ], ) @pytest.mark.unit @@ -166,6 +177,7 @@ def test_optimiser_kwargs(self, cost, optimiser): ): warnings.simplefilter("always") optim = optimiser(cost=cost, unrecognised=10) + assert not optim.pints_optimiser.running() else: # Check bounds in list format and update tol bounds = [ @@ -240,18 +252,24 @@ def test_optimiser_kwargs(self, cost, optimiser): # Check defaults assert optim.pints_optimiser.n_hyper_parameters() == 5 - assert not optim.pints_optimiser.running() assert optim.pints_optimiser.x_guessed() == optim.pints_optimiser._x0 with pytest.raises(Exception): optim.pints_optimiser.tell([0.1]) else: # Check and update initial values - assert optim.x0 == cost.x0 + x0 = cost.parameters.initial_value() + assert optim.x0 == x0 x0_new = np.array([0.6]) optim = optimiser(cost=cost, x0=x0_new) assert optim.x0 == x0_new - assert optim.x0 != cost.x0 + assert optim.x0 != x0 + + @pytest.mark.unit + def test_cuckoo_no_bounds(self, dataset, cost, model): + optim = pybop.CuckooSearch(cost=cost, bounds=None, max_iterations=1) + optim.run() + assert optim.pints_optimiser._boundaries is None @pytest.mark.unit def test_scipy_minimize_with_jac(self, cost): @@ -266,6 +284,16 @@ def test_scipy_minimize_with_jac(self, cost): ): optim = pybop.SciPyMinimize(cost=cost, jac="Invalid string") + @pytest.mark.unit + def test_scipy_minimize_invalid_x0(self, cost): + # Check a starting point that returns an infinite cost + invalid_x0 = np.array([1.1]) + optim = pybop.SciPyMinimize( + cost=cost, x0=invalid_x0, maxiter=10, allow_infeasible_solutions=False + ) + optim.run() + assert abs(optim._cost0) != np.inf + @pytest.mark.unit def test_single_parameter(self, cost): # Test catch for optimisers that can only run with multiple parameters @@ -322,13 +350,6 @@ class RandomClass: with pytest.raises(ValueError): pybop.Optimisation(cost=cost, optimiser=RandomClass) - @pytest.mark.unit - def test_prior_sampling(self, cost): - # Tests prior sampling - for i in range(50): - optim = pybop.Optimisation(cost=cost) - assert optim.x0[0] < 0.62 and optim.x0[0] > 0.58 - @pytest.mark.unit @pytest.mark.parametrize( "mean, sigma, expect_exception", @@ -403,6 +424,7 @@ def test_halting(self, cost): optim = pybop.Optimisation(cost=cost) # Trigger threshold + optim.set_threshold(None) optim.set_threshold(np.inf) optim.run() optim.set_max_unchanged_iterations() diff --git a/tests/unit/test_parameter_sets.py b/tests/unit/test_parameter_sets.py index 12b2c18a..347dfde9 100644 --- a/tests/unit/test_parameter_sets.py +++ b/tests/unit/test_parameter_sets.py @@ -123,3 +123,16 @@ def test_bpx_parameter_sets(self): match="Parameter set already constructed, or path to bpx file not provided.", ): bpx_parameters.import_from_bpx() + + @pytest.mark.unit + def test_set_formation_concentrations(self): + parameter_set = pybop.ParameterSet.pybamm( + "Chen2020", formation_concentrations=True + ) + + assert ( + parameter_set["Initial concentration in negative electrode [mol.m-3]"] == 0 + ) + assert ( + parameter_set["Initial concentration in positive electrode [mol.m-3]"] > 0 + ) diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 736684fe..ebfccea1 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -78,6 +78,16 @@ def test_invalid_inputs(self, parameter): ): pybop.Parameter("Name", bounds=[0.7, 0.3]) + @pytest.mark.unit + def test_sample_initial_values(self): + parameter = pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.02), + bounds=[0.375, 0.7], + ) + sample = parameter.get_initial_value() + assert (sample >= 0.375) and (sample <= 0.7) + class TestParameters: """ @@ -105,6 +115,18 @@ def test_parameters_construction(self, parameter): assert parameter.name in params.param.keys() assert parameter in params.param.values() + params.join( + pybop.Parameters( + parameter, + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.02), + bounds=[0.375, 0.7], + initial_value=0.6, + ), + ) + ) + with pytest.raises( ValueError, match="There is already a parameter with the name " @@ -128,6 +150,11 @@ def test_parameters_construction(self, parameter): initial_value=0.6, ) ) + with pytest.raises( + Exception, + match="Parameter requires a name.", + ): + params.add(dict(value=1)) with pytest.raises( ValueError, match="There is already a parameter with the name " @@ -156,6 +183,28 @@ def test_parameters_construction(self, parameter): ): params.remove(parameter_name=parameter) + @pytest.mark.unit + def test_parameters_naming(self, parameter): + params = pybop.Parameters(parameter) + param = params["Negative electrode active material volume fraction"] + assert param == parameter + + with pytest.raises( + ValueError, + match="is not the name of a parameter.", + ): + params["Positive electrode active material volume fraction"] + + @pytest.mark.unit + def test_parameters_update(self, parameter): + params = pybop.Parameters(parameter) + params.update(values=[0.5]) + assert parameter.value == 0.5 + params.update(bounds=[[0.38, 0.68]]) + assert parameter.bounds == [0.38, 0.68] + params.update(bounds=dict(lower=[0.37], upper=[0.7])) + assert parameter.bounds == [0.37, 0.7] + @pytest.mark.unit def test_get_sigma(self, parameter): params = pybop.Parameters(parameter) diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py index b810e3f0..4c7e14d4 100644 --- a/tests/unit/test_plots.py +++ b/tests/unit/test_plots.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pytest from packaging import version @@ -88,6 +90,9 @@ def test_problem_plots(self, fitting_problem, design_problem): pybop.quick_plot(fitting_problem, title="Optimised Comparison") pybop.quick_plot(design_problem) + # Test conversion of values into inputs + pybop.quick_plot(fitting_problem, problem_inputs=[0.6, 0.6]) + @pytest.fixture def cost(self, fitting_problem): # Define an example cost @@ -141,3 +146,74 @@ def test_with_ipykernel(self, dataset, cost, optim): pybop.plot_convergence(optim) pybop.plot_parameters(optim) pybop.plot2d(optim, steps=5) + + @pytest.mark.unit + def test_gaussianlogliklihood_plots(self, fitting_problem): + # Test plotting of GaussianLogLikelihood + likelihood = pybop.GaussianLogLikelihood(fitting_problem) + optim = pybop.CMAES(likelihood, max_iterations=5) + optim.run() + + # Plot parameters + pybop.plot_parameters(optim) + + @pytest.mark.unit + def test_plot2d_incorrect_number_of_parameters(self, model, dataset): + # Test with less than two paramters + parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + ), + ) + fitting_problem = pybop.FittingProblem(model, parameters, dataset) + cost = pybop.SumSquaredError(fitting_problem) + with pytest.raises( + ValueError, match="This cost function takes fewer than 2 parameters." + ): + pybop.plot2d(cost) + + # Test with more than two paramters + parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.58, 0.05), + bounds=[0.4, 0.7], + ), + pybop.Parameter( + "Positive particle radius [m]", + prior=pybop.Gaussian(4.8e-06, 0.05e-06), + bounds=[4e-06, 6e-06], + ), + ) + fitting_problem = pybop.FittingProblem(model, parameters, dataset) + cost = pybop.SumSquaredError(fitting_problem) + pybop.plot2d(cost) + + @pytest.mark.unit + def test_plot2d_prior_bounds(self, model, dataset): + # Test with prior bounds + parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.01), + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.58, 0.01), + ), + ) + fitting_problem = pybop.FittingProblem(model, parameters, dataset) + cost = pybop.SumSquaredError(fitting_problem) + with pytest.warns( + UserWarning, + match="Bounds were created from prior distributions.", + ): + warnings.simplefilter("always") + pybop.plot2d(cost) diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py index 02669201..2d5727b6 100644 --- a/tests/unit/test_standalone.py +++ b/tests/unit/test_standalone.py @@ -35,7 +35,8 @@ def test_optimisation_on_standalone_cost(self): optim = pybop.SciPyDifferentialEvolution(cost=cost) x, final_cost = optim.run() - initial_cost = optim.cost(cost.x0) + optim.x0 = optim.log["x"][0][0] + initial_cost = optim.cost(optim.x0) assert initial_cost > final_cost np.testing.assert_allclose(final_cost, 42, atol=1e-1)