diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index 63771150..704c7975 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -22,6 +22,11 @@ jobs: runs-on: [self-hosted, macOS, ARM64] if: github.repository == 'pybop-team/PyBOP' steps: + - name: Cleanup build folder + run: | + rm -rf ./* || true + rm -rf ./.??* || true + - uses: actions/checkout@v4 - name: Install python & create virtualenv diff --git a/.github/workflows/scheduled_tests.yaml b/.github/workflows/scheduled_tests.yaml index ade88188..159152f0 100644 --- a/.github/workflows/scheduled_tests.yaml +++ b/.github/workflows/scheduled_tests.yaml @@ -113,6 +113,11 @@ jobs: matrix: ${{fromJson(needs.filter_pybamm_matrix.outputs.filtered_pybop_matrix)}} steps: + - name: Cleanup build folder + run: | + rm -rf ./* || true + rm -rf ./.??* || true + - uses: actions/checkout@v4 - name: Install python & create virtualenv shell: bash diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 401b5338..47ef467c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ ci: repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.4.8" + rev: "v0.5.0" hooks: - id: ruff args: [--fix, --show-fixes] diff --git a/CHANGELOG.md b/CHANGELOG.md index e256e93e..93752e1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,8 @@ ## Features - +- [#379](https://github.com/pybop-team/PyBOP/pull/379) - Adds model.simulateS1 to weekly benchmarks. +- [#174](https://github.com/pybop-team/PyBOP/issues/174) - Adds new logo and updates Readme for accessibility. - [#316](https://github.com/pybop-team/PyBOP/pull/316) - Adds Adam with weight decay (AdamW) optimiser, adds depreciation warning for pints.Adam implementation. - [#271](https://github.com/pybop-team/PyBOP/issues/271) - Aligns the output of the optimisers via a generalisation of Result class. - [#315](https://github.com/pybop-team/PyBOP/pull/315) - Updates __init__ structure to remove circular import issues and minimises dependancy imports across codebase for faster PyBOP module import. Adds type-hints to BaseModel and refactors rebuild parameter variables. @@ -24,6 +25,8 @@ ## Bug Fixes +- [#372](https://github.com/pybop-team/PyBOP/pull/372) - Converts `np.array` to `np.asarray` for Numpy v2.0 support. +- [#165](https://github.com/pybop-team/PyBOP/issues/165) - Stores the attempted and best parameter values and the best cost for each iteration in the log attribute of the optimiser and updates the associated plots. - [#354](https://github.com/pybop-team/PyBOP/issues/354) - Fixes the calculation of the gradient in the `RootMeanSquaredError` cost. - [#347](https://github.com/pybop-team/PyBOP/issues/347) - Resets options between MSMR tests to cope with a bug in PyBaMM v23.9 which is fixed in PyBaMM v24.1. - [#337](https://github.com/pybop-team/PyBOP/issues/337) - Restores benchmarks, relaxes CI schedule for benchmarks and scheduled tests. @@ -36,6 +39,15 @@ - [#270](https://github.com/pybop-team/PyBOP/pull/270) - Updates PR template. - [#91](https://github.com/pybop-team/PyBOP/issues/91) - Adds a check on the number of parameters for CMAES and makes XNES the default optimiser. +# [v24.3.1](https://github.com/pybop-team/PyBOP/tree/v24.3.1) - 2024-06-17 + +## Features + + +## Bug Fixes + +- [#369](https://github.com/pybop-team/PyBOP/pull/369) - Upper pins Numpy < 2.0 due to breaking Pints' functionality. + # [v24.3](https://github.com/pybop-team/PyBOP/tree/v24.3) - 2024-03-25 ## Features diff --git a/CITATION.cff b/CITATION.cff index 73dcce74..a14af062 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -11,5 +11,5 @@ authors: family-names: Courtier - given-names: David family-names: Howey -version: "24.3" # Update this when you release a new version +version: "24.3.1" # Update this when you release a new version repository-code: 'https://www.github.com/pybop-team/pybop' diff --git a/README.md b/README.md index 8fd09c0a..99f7a031 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@
- ![logo](https://raw.githubusercontent.com/pybop-team/PyBOP/develop/assets/Temp_Logo.png) + logo.svg + # Python Battery Optimisation and Parameterisation @@ -25,8 +26,7 @@ PyBOP provides a complete set of tools for parameterisation and optimisation of The diagram below shows the conceptual framework of PyBOP. This package is currently under development, so users can expect the API to evolve with future releases.

- - pybop_arch.svg + pybop_arch.svg

## Installation diff --git a/assets/PyBOP-high-level.svg b/assets/PyBOP-high-level.svg index 00f3428f..25de26fd 100644 --- a/assets/PyBOP-high-level.svg +++ b/assets/PyBOP-high-level.svg @@ -1,2906 +1,4149 @@ image/svg+xml + + + + + + + +80μ + +100μ + +120μ + +140μ + +160μ + +80μ + +100μ + +120μ + +140μ + +160μ + +100k + +200k + +300k + +400k + +500k + +600k + +700k + +800k + +900k + +Negative electrode thickness [m] + +Positive electrode thickness [m] + +Volumetric energy density [Wh.m-3]vs. electrode thicknesses Transport limitedGoldilocksregion + +ParameterIdentification + + + + +DesignOptimisation +Design OptimisationParameter -Identification + d="m -11.033,50303 c 0,-27781 22523,-50304 50304,-50304 H 1271428 c 27781,0 50304,22523 50304,50304 v 525611 c 0,27781 -22523,50304 -50304,50304 H 50292.967 c -27781,0 -50304,-22523 -50304,-50304 z" /> + + + +ExperimentalData +inverse modelling +forward models +Physics and Empirical Models +Funding / Friend Projects +parameters +simulations + diff --git a/assets/Temp_Logo.png b/assets/Temp_Logo.png deleted file mode 100644 index 4ef2853b..00000000 Binary files a/assets/Temp_Logo.png and /dev/null differ diff --git a/assets/logo/PyBOP_logo_flat.png b/assets/logo/PyBOP_logo_flat.png new file mode 100644 index 00000000..8e0fb139 Binary files /dev/null and b/assets/logo/PyBOP_logo_flat.png differ diff --git a/assets/logo/PyBOP_logo_flat.svg b/assets/logo/PyBOP_logo_flat.svg new file mode 100644 index 00000000..64fdafe1 --- /dev/null +++ b/assets/logo/PyBOP_logo_flat.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_flat_inverse.png b/assets/logo/PyBOP_logo_flat_inverse.png new file mode 100644 index 00000000..f8aa5817 Binary files /dev/null and b/assets/logo/PyBOP_logo_flat_inverse.png differ diff --git a/assets/logo/PyBOP_logo_flat_inverse.svg b/assets/logo/PyBOP_logo_flat_inverse.svg new file mode 100644 index 00000000..d8c3e651 --- /dev/null +++ b/assets/logo/PyBOP_logo_flat_inverse.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_inverse.png b/assets/logo/PyBOP_logo_inverse.png new file mode 100644 index 00000000..62a4cf7d Binary files /dev/null and b/assets/logo/PyBOP_logo_inverse.png differ diff --git a/assets/logo/PyBOP_logo_inverse.svg b/assets/logo/PyBOP_logo_inverse.svg new file mode 100644 index 00000000..e4ced905 --- /dev/null +++ b/assets/logo/PyBOP_logo_inverse.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark.png b/assets/logo/PyBOP_logo_mark.png new file mode 100644 index 00000000..6fce2b94 Binary files /dev/null and b/assets/logo/PyBOP_logo_mark.png differ diff --git a/assets/logo/PyBOP_logo_mark.svg b/assets/logo/PyBOP_logo_mark.svg new file mode 100644 index 00000000..559ef406 --- /dev/null +++ b/assets/logo/PyBOP_logo_mark.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark_circle.png b/assets/logo/PyBOP_logo_mark_circle.png new file mode 100644 index 00000000..adc77e6b Binary files /dev/null and b/assets/logo/PyBOP_logo_mark_circle.png differ diff --git a/assets/logo/PyBOP_logo_mark_circle.svg b/assets/logo/PyBOP_logo_mark_circle.svg new file mode 100644 index 00000000..1f4ba833 --- /dev/null +++ b/assets/logo/PyBOP_logo_mark_circle.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark_mono.png b/assets/logo/PyBOP_logo_mark_mono.png new file mode 100644 index 00000000..bec070d0 Binary files /dev/null and b/assets/logo/PyBOP_logo_mark_mono.png differ diff --git a/assets/logo/PyBOP_logo_mark_mono.svg b/assets/logo/PyBOP_logo_mark_mono.svg new file mode 100644 index 00000000..204f6e2f --- /dev/null +++ b/assets/logo/PyBOP_logo_mark_mono.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mark_mono_inverse.png b/assets/logo/PyBOP_logo_mark_mono_inverse.png new file mode 100644 index 00000000..3d4cc15f Binary files /dev/null and b/assets/logo/PyBOP_logo_mark_mono_inverse.png differ diff --git a/assets/logo/PyBOP_logo_mark_mono_inverse.svg b/assets/logo/PyBOP_logo_mark_mono_inverse.svg new file mode 100644 index 00000000..ab859856 --- /dev/null +++ b/assets/logo/PyBOP_logo_mark_mono_inverse.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mono.png b/assets/logo/PyBOP_logo_mono.png new file mode 100644 index 00000000..af9be830 Binary files /dev/null and b/assets/logo/PyBOP_logo_mono.png differ diff --git a/assets/logo/PyBOP_logo_mono.svg b/assets/logo/PyBOP_logo_mono.svg new file mode 100644 index 00000000..b135967e --- /dev/null +++ b/assets/logo/PyBOP_logo_mono.svg @@ -0,0 +1 @@ + diff --git a/assets/logo/PyBOP_logo_mono_inverse.png b/assets/logo/PyBOP_logo_mono_inverse.png new file mode 100644 index 00000000..cc9030c6 Binary files /dev/null and b/assets/logo/PyBOP_logo_mono_inverse.png differ diff --git a/assets/logo/PyBOP_logo_mono_inverse.svg b/assets/logo/PyBOP_logo_mono_inverse.svg new file mode 100644 index 00000000..830bea1a --- /dev/null +++ b/assets/logo/PyBOP_logo_mono_inverse.svg @@ -0,0 +1 @@ + diff --git a/benchmarks/benchmark_model.py b/benchmarks/benchmark_model.py index df0335c2..843b03bc 100644 --- a/benchmarks/benchmark_model.py +++ b/benchmarks/benchmark_model.py @@ -81,3 +81,13 @@ def time_model_simulate(self, model, parameter_set): parameter_set (str): The name of the parameter set being used. """ self.problem._model.simulate(inputs=self.inputs, t_eval=self.t_eval) + + def time_model_simulateS1(self, model, parameter_set): + """ + Benchmark the simulateS1 method of the model. + + Args: + model (pybop.Model): The model class being benchmarked. + parameter_set (str): The name of the parameter set being used. + """ + self.problem._model.simulateS1(inputs=self.inputs, t_eval=self.t_eval) diff --git a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb index 6e2d698d..9fd084dd 100644 --- a/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb +++ b/examples/notebooks/LG_M50_ECM/1-single-pulse-circuit-model.ipynb @@ -1679,7 +1679,7 @@ } ], "source": [ - "pybop.quick_plot(problem, inputs=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -1850,7 +1850,7 @@ } ], "source": [ - "pybop.quick_plot(problem, inputs=x, title=\"Parameter Extrapolation\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Parameter Extrapolation\");" ] }, { diff --git a/examples/notebooks/equivalent_circuit_identification.ipynb b/examples/notebooks/equivalent_circuit_identification.ipynb index 3f5f550e..6184c191 100644 --- a/examples/notebooks/equivalent_circuit_identification.ipynb +++ b/examples/notebooks/equivalent_circuit_identification.ipynb @@ -457,7 +457,7 @@ } ], "source": [ - "pybop.quick_plot(problem, inputs=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/multi_model_identification.ipynb b/examples/notebooks/multi_model_identification.ipynb index b15e6a26..a66a78f2 100644 --- a/examples/notebooks/multi_model_identification.ipynb +++ b/examples/notebooks/multi_model_identification.ipynb @@ -3904,7 +3904,9 @@ ], "source": [ "for optim, x in zip(optims, xs):\n", - " pybop.quick_plot(optim.cost.problem, inputs=x, title=optim.cost.problem.model.name)" + " pybop.quick_plot(\n", + " optim.cost.problem, problem_inputs=x, title=optim.cost.problem.model.name\n", + " )" ] }, { @@ -3956,7 +3958,7 @@ } ], "source": [ - "bounds = np.array([[5.5e-05, 8e-05], [7.5e-05, 9e-05]])\n", + "bounds = np.asarray([[5.5e-05, 8e-05], [7.5e-05, 9e-05]])\n", "for optim in optims:\n", " pybop.plot2d(optim, bounds=bounds, steps=10, title=optim.cost.problem.model.name)" ] diff --git a/examples/notebooks/multi_optimiser_identification.ipynb b/examples/notebooks/multi_optimiser_identification.ipynb index 3ee6e6ad..1422985d 100644 --- a/examples/notebooks/multi_optimiser_identification.ipynb +++ b/examples/notebooks/multi_optimiser_identification.ipynb @@ -599,7 +599,7 @@ ], "source": [ "for optim, x in zip(optims, xs):\n", - " pybop.quick_plot(optim.cost.problem, inputs=x, title=optim.name())" + " pybop.quick_plot(optim.cost.problem, problem_inputs=x, title=optim.name())" ] }, { @@ -925,7 +925,7 @@ ], "source": [ "# Plot the cost landscape with optimisation path and updated bounds\n", - "bounds = np.array([[0.5, 0.8], [0.55, 0.8]])\n", + "bounds = np.asarray([[0.5, 0.8], [0.55, 0.8]])\n", "for optim in optims:\n", " pybop.plot2d(optim, bounds=bounds, steps=10, title=optim.name())" ] diff --git a/examples/notebooks/optimiser_calibration.ipynb b/examples/notebooks/optimiser_calibration.ipynb index 20d2feca..ec4c1551 100644 --- a/examples/notebooks/optimiser_calibration.ipynb +++ b/examples/notebooks/optimiser_calibration.ipynb @@ -404,7 +404,7 @@ } ], "source": [ - "pybop.quick_plot(problem, inputs=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -677,7 +677,7 @@ ], "source": [ "# Plot the cost landscape with optimisation path and updated bounds\n", - "bounds = np.array([[0.6, 0.9], [0.5, 0.8]])\n", + "bounds = np.asarray([[0.6, 0.9], [0.5, 0.8]])\n", "for optim, sigma in zip(optims, sigmas):\n", " pybop.plot2d(optim, bounds=bounds, steps=10, title=f\"Sigma: {sigma}\")" ] @@ -723,7 +723,7 @@ "source": [ "optim = pybop.GradientDescent(cost, sigma0=0.0115)\n", "x, final_cost = optim.run()\n", - "pybop.quick_plot(problem, inputs=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/pouch_cell_identification.ipynb b/examples/notebooks/pouch_cell_identification.ipynb index 444f36f7..d952e22c 100644 --- a/examples/notebooks/pouch_cell_identification.ipynb +++ b/examples/notebooks/pouch_cell_identification.ipynb @@ -517,7 +517,7 @@ } ], "source": [ - "pybop.quick_plot(problem, inputs=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/notebooks/spm_AdamW.ipynb b/examples/notebooks/spm_AdamW.ipynb index 7796c832..ec9a961a 100644 --- a/examples/notebooks/spm_AdamW.ipynb +++ b/examples/notebooks/spm_AdamW.ipynb @@ -437,7 +437,7 @@ } ], "source": [ - "pybop.quick_plot(problem, inputs=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { @@ -530,7 +530,7 @@ "# Plot the cost landscape\n", "pybop.plot2d(cost, steps=15)\n", "# Plot the cost landscape with optimisation path and updated bounds\n", - "bounds = np.array([[0.6, 0.9], [0.5, 0.8]])\n", + "bounds = np.asarray([[0.6, 0.9], [0.5, 0.8]])\n", "pybop.plot2d(optim, bounds=bounds, steps=15);" ] }, diff --git a/examples/notebooks/spm_electrode_design.ipynb b/examples/notebooks/spm_electrode_design.ipynb index 3cd47b1e..e1fd5820 100644 --- a/examples/notebooks/spm_electrode_design.ipynb +++ b/examples/notebooks/spm_electrode_design.ipynb @@ -329,7 +329,7 @@ "source": [ "if cost.update_capacity:\n", " problem._model.approximate_capacity(x)\n", - "pybop.quick_plot(problem, inputs=x, title=\"Optimised Comparison\");" + "pybop.quick_plot(problem, problem_inputs=x, title=\"Optimised Comparison\");" ] }, { diff --git a/examples/scripts/BPX_spm.py b/examples/scripts/BPX_spm.py index 7a1881c4..eea65884 100644 --- a/examples/scripts/BPX_spm.py +++ b/examples/scripts/BPX_spm.py @@ -51,7 +51,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/ecm_CMAES.py b/examples/scripts/ecm_CMAES.py index 2074a457..953d7e6a 100644 --- a/examples/scripts/ecm_CMAES.py +++ b/examples/scripts/ecm_CMAES.py @@ -89,7 +89,7 @@ pybop.plot_dataset(dataset) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -101,5 +101,5 @@ pybop.plot2d(cost, steps=15) # Plot the cost landscape with optimisation path and updated bounds -bounds = np.array([[1e-4, 1e-2], [1e-5, 1e-2]]) +bounds = np.asarray([[1e-4, 1e-2], [1e-5, 1e-2]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py index 65799322..7875d03c 100644 --- a/examples/scripts/exp_UKF.py +++ b/examples/scripts/exp_UKF.py @@ -103,7 +103,7 @@ print("Estimated parameters:", x) # Plot the timeseries output (requires model that returns Voltage) -pybop.quick_plot(observer, inputs=x, title="Optimised Comparison") +pybop.quick_plot(observer, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/gitt.py b/examples/scripts/gitt.py index 2320995a..6d3b4a94 100644 --- a/examples/scripts/gitt.py +++ b/examples/scripts/gitt.py @@ -59,7 +59,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_AdamW.py b/examples/scripts/spm_AdamW.py index 66220978..796849be 100644 --- a/examples/scripts/spm_AdamW.py +++ b/examples/scripts/spm_AdamW.py @@ -68,7 +68,7 @@ def noise(sigma): print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -77,5 +77,5 @@ def noise(sigma): pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index 7e74e7a9..ed38144a 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -53,7 +53,7 @@ pybop.plot_dataset(dataset) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_IRPropMin.py b/examples/scripts/spm_IRPropMin.py index fef39546..1969f6f9 100644 --- a/examples/scripts/spm_IRPropMin.py +++ b/examples/scripts/spm_IRPropMin.py @@ -42,7 +42,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -51,5 +51,5 @@ pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_MAP.py b/examples/scripts/spm_MAP.py index 58304fa2..dc135fdc 100644 --- a/examples/scripts/spm_MAP.py +++ b/examples/scripts/spm_MAP.py @@ -57,7 +57,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x[0:2], title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x[0:2], title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -69,5 +69,5 @@ pybop.plot2d(cost, steps=15) # Plot the cost landscape with optimisation path -bounds = np.array([[0.55, 0.77], [0.48, 0.68]]) +bounds = np.asarray([[0.55, 0.77], [0.48, 0.68]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py index 7532ee29..d5d6e641 100644 --- a/examples/scripts/spm_MLE.py +++ b/examples/scripts/spm_MLE.py @@ -57,7 +57,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x[0:2], title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x[0:2], title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -69,5 +69,5 @@ pybop.plot2d(likelihood, steps=15) # Plot the cost landscape with optimisation path -bounds = np.array([[0.55, 0.77], [0.48, 0.68]]) +bounds = np.asarray([[0.55, 0.77], [0.48, 0.68]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_NelderMead.py b/examples/scripts/spm_NelderMead.py index 3d938e6e..e07801e0 100644 --- a/examples/scripts/spm_NelderMead.py +++ b/examples/scripts/spm_NelderMead.py @@ -68,7 +68,7 @@ def noise(sigma): print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -77,5 +77,5 @@ def noise(sigma): pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_SNES.py b/examples/scripts/spm_SNES.py index 3f737203..93046d63 100644 --- a/examples/scripts/spm_SNES.py +++ b/examples/scripts/spm_SNES.py @@ -42,7 +42,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_UKF.py b/examples/scripts/spm_UKF.py index 09adb4e7..e528c715 100644 --- a/examples/scripts/spm_UKF.py +++ b/examples/scripts/spm_UKF.py @@ -68,7 +68,7 @@ print("Estimated parameters:", x) # Plot the timeseries output (requires model that returns Voltage) -pybop.quick_plot(observer, inputs=x, title="Optimised Comparison") +pybop.quick_plot(observer, problem_inputs=x, title="Optimised Comparison") # # Plot convergence # pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_XNES.py b/examples/scripts/spm_XNES.py index c7b9e75c..40900640 100644 --- a/examples/scripts/spm_XNES.py +++ b/examples/scripts/spm_XNES.py @@ -43,7 +43,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_descent.py b/examples/scripts/spm_descent.py index 448d907c..94573f0c 100644 --- a/examples/scripts/spm_descent.py +++ b/examples/scripts/spm_descent.py @@ -48,7 +48,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) @@ -57,5 +57,5 @@ pybop.plot_parameters(optim) # Plot the cost landscape with optimisation path -bounds = np.array([[0.5, 0.8], [0.4, 0.7]]) +bounds = np.asarray([[0.5, 0.8], [0.4, 0.7]]) pybop.plot2d(optim, bounds=bounds, steps=15) diff --git a/examples/scripts/spm_pso.py b/examples/scripts/spm_pso.py index a69ea3eb..efc97ad2 100644 --- a/examples/scripts/spm_pso.py +++ b/examples/scripts/spm_pso.py @@ -43,7 +43,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spm_scipymin.py b/examples/scripts/spm_scipymin.py index ede7de3e..b6cec3f0 100644 --- a/examples/scripts/spm_scipymin.py +++ b/examples/scripts/spm_scipymin.py @@ -45,7 +45,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index c103398d..64ecd2e1 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -60,7 +60,7 @@ # Plot the timeseries output if cost.update_capacity: problem._model.approximate_capacity(x) -pybop.quick_plot(problem, inputs=x, title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot the cost landscape with optimisation path if len(x) == 2: diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index e4d51501..9406572b 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -1,7 +1,7 @@ import numpy as np from pybop.costs.base_cost import BaseCost -from pybop.models.base_model import Inputs +from pybop.parameters.parameter import Inputs class BaseLikelihood(BaseCost): @@ -48,7 +48,7 @@ def set_sigma(self, sigma): ) if not isinstance(sigma, np.ndarray): - sigma = np.array(sigma) + sigma = np.asarray(sigma) if not np.issubdtype(sigma.dtype, np.number): raise ValueError("Sigma must contain only numeric values") @@ -75,7 +75,7 @@ def _evaluate(self, inputs: Inputs, grad=None): if len(y.get(key, [])) != len(self._target.get(key, [])): return -np.float64(np.inf) # prediction doesn't match target - e = np.array( + e = np.asarray( [ np.sum( self._offset @@ -103,7 +103,7 @@ def _evaluateS1(self, inputs: Inputs, grad=None): dl = self._dl * np.ones(self.n_parameters) return -likelihood, -dl - r = np.array([self._target[signal] - y[signal] for signal in self.signal]) + r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) likelihood = self._evaluate(inputs) dl = np.sum((self.sigma2 * np.sum((r * dy.T), axis=2)), axis=1) return likelihood, dl @@ -139,7 +139,7 @@ def _evaluate(self, inputs: Inputs, grad=None): Returns: float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. """ - sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND + sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) if np.any(sigma <= 0): return -np.inf @@ -150,7 +150,7 @@ def _evaluate(self, inputs: Inputs, grad=None): if len(y.get(key, [])) != len(self._target.get(key, [])): return -np.float64(np.inf) # prediction doesn't match target - e = np.array( + e = np.asarray( [ np.sum( self._logpi @@ -171,7 +171,7 @@ def _evaluateS1(self, inputs: Inputs, grad=None): Calls the problem.evaluateS1 method and calculates the log-likelihood """ - sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND + sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) if np.any(sigma <= 0): return -np.float64(np.inf), -self._dl * np.ones(self.n_parameters) @@ -183,7 +183,7 @@ def _evaluateS1(self, inputs: Inputs, grad=None): dl = self._dl * np.ones(self.n_parameters) return -likelihood, -dl - r = np.array([self._target[signal] - y[signal] for signal in self.signal]) + r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) likelihood = self._evaluate(inputs) dl = sigma ** (-2.0) * np.sum((r * dy.T), axis=2) dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=1) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index a9a11b9c..659e3f7f 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,6 +1,5 @@ from pybop import BaseProblem -from pybop.models.base_model import Inputs -from pybop.parameters.parameter import Parameters +from pybop.parameters.parameter import Inputs, Parameters class BaseCost: diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 76dbd5f6..85f3dee4 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -3,7 +3,7 @@ import numpy as np from pybop.costs.base_cost import BaseCost -from pybop.models.base_model import Inputs +from pybop.parameters.parameter import Inputs class DesignCost(BaseCost): diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 7993a0b4..3cb57ec9 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -2,8 +2,8 @@ from pybop.costs._likelihoods import BaseLikelihood from pybop.costs.base_cost import BaseCost -from pybop.models.base_model import Inputs from pybop.observers.observer import Observer +from pybop.parameters.parameter import Inputs class RootMeanSquaredError(BaseCost): @@ -48,7 +48,7 @@ def _evaluate(self, inputs: Inputs, grad=None): if len(prediction.get(key, [])) != len(self._target.get(key, [])): return np.float64(np.inf) # prediction doesn't match target - e = np.array( + e = np.asarray( [ np.sqrt(np.mean((prediction[signal] - self._target[signal]) ** 2)) for signal in self.signal @@ -88,7 +88,7 @@ def _evaluateS1(self, inputs: Inputs): de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) e = np.sqrt(np.mean(r**2, axis=1)) de = np.mean((r * dy.T), axis=2) / (e + np.finfo(float).eps) @@ -160,7 +160,7 @@ def _evaluate(self, inputs: Inputs, grad=None): if len(prediction.get(key, [])) != len(self._target.get(key, [])): return np.float64(np.inf) # prediction doesn't match target - e = np.array( + e = np.asarray( [ np.sum(((prediction[signal] - self._target[signal]) ** 2)) for signal in self.signal @@ -198,7 +198,7 @@ def _evaluateS1(self, inputs: Inputs): de = self._de * np.ones(self.n_parameters) return e, de - r = np.array([y[signal] - self._target[signal] for signal in self.signal]) + r = np.asarray([y[signal] - self._target[signal] for signal in self.signal]) e = np.sum(np.sum(r**2, axis=0), axis=0) de = 2 * np.sum(np.sum((r * dy.T), axis=2), axis=1) @@ -331,7 +331,7 @@ def _evaluate(self, inputs: Inputs, grad=None): """ log_likelihood = self.likelihood._evaluate(inputs) log_prior = sum( - self.parameters[key].prior.logpdf(inputs[key]) for key in inputs.keys() + self.parameters[key].prior.logpdf(value) for key, value in inputs.items() ) posterior = log_likelihood + log_prior diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py index 1c740119..a016bbc6 100644 --- a/pybop/models/base_model.py +++ b/pybop/models/base_model.py @@ -7,8 +7,7 @@ import pybamm from pybop import Dataset, Experiment, Parameters, ParameterSet - -Inputs = Dict[str, float] +from pybop.parameters.parameter import Inputs @dataclass @@ -104,9 +103,7 @@ def build( The initial state of charge to be used in simulations. """ self.dataset = dataset - if parameters is None: - self.parameters = Parameters() - else: + if parameters is not None: self.parameters = parameters self.classify_and_update_parameters(self.parameters) @@ -293,7 +290,7 @@ def reinit( if x is None: x = self._built_model.y0 - sol = pybamm.Solution([np.array([t])], [x], self._built_model, inputs) + sol = pybamm.Solution([np.asarray([t])], [x], self._built_model, inputs) return TimeSeriesState(sol=sol, inputs=inputs, t=t) @@ -304,7 +301,7 @@ def get_state(self, inputs: Inputs, t: float, x: np.ndarray) -> TimeSeriesState: if self._built_model is None: raise ValueError("Model must be built before calling get_state") - sol = pybamm.Solution([np.array([t])], [x], self._built_model, inputs) + sol = pybamm.Solution([np.asarray([t])], [x], self._built_model, inputs) return TimeSeriesState(sol=sol, inputs=inputs, t=t) @@ -466,7 +463,7 @@ def predict( Parameters ---------- - inputs : Inputse, optional + inputs : Inputs, optional Input parameters for the simulation. Defaults to None, indicating that the default parameters should be used. t_eval : array-like, optional diff --git a/pybop/models/empirical/ecm.py b/pybop/models/empirical/ecm.py index 784fccb0..d2d97d6d 100644 --- a/pybop/models/empirical/ecm.py +++ b/pybop/models/empirical/ecm.py @@ -1,7 +1,7 @@ from pybamm import equivalent_circuit as pybamm_equivalent_circuit -from pybop.models.base_model import Inputs from pybop.models.empirical.base_ecm import ECircuitModel +from pybop.parameters.parameter import Inputs class Thevenin(ECircuitModel): diff --git a/pybop/observers/observer.py b/pybop/observers/observer.py index 0c374f10..1c35c25d 100644 --- a/pybop/observers/observer.py +++ b/pybop/observers/observer.py @@ -135,7 +135,7 @@ def get_current_covariance(self) -> Covariance: def get_measure(self, x: TimeSeriesState) -> np.ndarray: measures = [x.sol[s].data[-1] for s in self._signal] - return np.array([[m] for m in measures]) + return np.asarray([[m] for m in measures]) def get_current_time(self) -> float: """ diff --git a/pybop/observers/unscented_kalman.py b/pybop/observers/unscented_kalman.py index 60fe0d53..afbc2a01 100644 --- a/pybop/observers/unscented_kalman.py +++ b/pybop/observers/unscented_kalman.py @@ -118,7 +118,7 @@ def observe(self, time: float, value: np.ndarray) -> float: if value is None: raise ValueError("Measurement must be provided.") elif isinstance(value, np.floating): - value = np.array([value]) + value = np.asarray([value]) dt = time - self.get_current_time() if dt < 0: @@ -201,7 +201,7 @@ def __init__( zero_cols = np.logical_and(np.all(P0 == 0, axis=1), np.all(Rp == 0, axis=1)) zeros = np.logical_and(zero_rows, zero_cols) ones = np.logical_not(zeros) - states = np.array(range(len(x0)))[ones] + states = np.asarray(range(len(x0)))[ones] bool_mask = np.ix_(ones, ones) S_filtered = linalg.cholesky(P0[ones, :][:, ones]) @@ -276,11 +276,11 @@ def gen_sigma_points( # Define the weights of the sigma points w_m0 = sigma / (L + sigma) - w_m = np.array([w_m0] + [1 / (2 * (L + sigma))] * (2 * L)) + w_m = np.asarray([w_m0] + [1 / (2 * (L + sigma))] * (2 * L)) # Define the weights of the covariance of the sigma points w_c0 = w_m0 + (1 - alpha**2 + beta) - w_c = np.array([w_c0] + [1 / (2 * (L + sigma))] * (2 * L)) + w_c = np.asarray([w_c0] + [1 / (2 * (L + sigma))] * (2 * L)) return (points, w_m, w_c) diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index caae83d6..ba433063 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -40,8 +40,8 @@ class BaseOptimiser: If True, the feasibility of the optimised parameters is checked (default: True). allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). - log : list - A log of the parameter values tried during the optimisation. + log : dict + A log of the parameter values tried during the optimisation and associated costs. """ def __init__( @@ -55,7 +55,7 @@ def __init__( self.bounds = None self.sigma0 = 0.1 self.verbose = False - self.log = [] + self.log = dict(x=[], x_best=[], cost=[]) self.minimising = True self.physical_viability = False self.allow_infeasible_solutions = False @@ -112,7 +112,7 @@ def set_base_options(self): """ Update the base optimiser options and remove them from the options dictionary. """ - # Set initial values + # Set initial values, if x0 is None, initial values are unmodified. self.parameters.update(initial_values=self.unset_options.pop("x0", None)) self.x0 = self.parameters.initial_value() diff --git a/pybop/optimisers/base_pints_optimiser.py b/pybop/optimisers/base_pints_optimiser.py index e0e78ba8..a5140df0 100644 --- a/pybop/optimisers/base_pints_optimiser.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -258,7 +258,9 @@ def f(x, grad=None): # Update counts evaluations += len(fs) iteration += 1 - self.log.append(xs) + self.log["x"].append(xs) + self.log["x_best"].append(self.pints_optimiser.x_best()) + self.log["cost"].append(fb if self.minimising else -fb) # Check stopping criteria: # Maximum number of iterations diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index d209548b..544abfc8 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -1,5 +1,5 @@ import numpy as np -from scipy.optimize import differential_evolution, minimize +from scipy.optimize import OptimizeResult, differential_evolution, minimize from pybop import BaseOptimiser, Result @@ -150,11 +150,13 @@ def _run_optimiser(self): result : scipy.optimize.OptimizeResult The result of the optimisation including the optimised parameter values and cost. """ - self.log = [[self.x0]] # Add callback storing history of parameter values - def callback(x): - self.log.append([x]) + def callback(intermediate_result: OptimizeResult): + self.log["x_best"].append(intermediate_result.x) + self.log["cost"].append( + intermediate_result.fun if self.minimising else -intermediate_result.fun + ) # Compute the absolute initial cost and resample if required self._cost0 = np.abs(self.cost(self.x0)) @@ -175,6 +177,7 @@ def callback(x): if not self._options["jac"]: def cost_wrapper(x): + self.log["x"].append([x]) cost = self.cost(x) / self._cost0 if np.isinf(cost): self.inf_count += 1 @@ -183,6 +186,7 @@ def cost_wrapper(x): elif self._options["jac"] is True: def cost_wrapper(x): + self.log["x"].append([x]) L, dl = self.cost.evaluateS1(x) return L, dl if self.minimising else -L, -dl @@ -297,10 +301,14 @@ def _run_optimiser(self): self.x0 = None # Add callback storing history of parameter values - def callback(x, convergence): - self.log.append([x]) + def callback(intermediate_result: OptimizeResult): + self.log["x_best"].append(intermediate_result.x) + self.log["cost"].append( + intermediate_result.fun if self.minimising else -intermediate_result.fun + ) def cost_wrapper(x): + self.log["x"].append([x]) return self.cost(x) if self.minimising else -self.cost(x) return differential_evolution( diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index a912f302..e1a828af 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -1,10 +1,12 @@ from collections import OrderedDict -from typing import Dict, List +from typing import Dict, List, Union import numpy as np from pybop._utils import is_numeric +Inputs = Dict[str, float] + class Parameter: """ @@ -169,7 +171,7 @@ def __init__(self, *args): for param in args: self.add(param) - def __getitem__(self, key: str): + def __getitem__(self, key: str) -> Parameter: """ Return the parameter dictionary corresponding to a particular key. @@ -426,7 +428,7 @@ def as_dict(self, values=None) -> Dict: values = self.true_value() return {key: values[i] for i, key in enumerate(self.param.keys())} - def verify(self, inputs=None): + def verify(self, inputs: Union[Inputs, None] = None): """ Verify that the inputs are an Inputs dictionary or numeric values which can be used to construct an Inputs dictionary diff --git a/pybop/plotting/plot2d.py b/pybop/plotting/plot2d.py index 0ee95dc7..961bc7c4 100644 --- a/pybop/plotting/plot2d.py +++ b/pybop/plotting/plot2d.py @@ -1,12 +1,19 @@ import sys import numpy as np +from scipy.interpolate import griddata from pybop import BaseOptimiser, Optimisation, PlotlyManager def plot2d( - cost_or_optim, gradient=False, bounds=None, steps=10, show=True, **layout_kwargs + cost_or_optim, + gradient: bool = False, + bounds: np.ndarray = None, + steps: int = 10, + show: bool = True, + use_optim_log: bool = False, + **layout_kwargs, ): """ Plot a 2D visualisation of a cost landscape using Plotly. @@ -26,9 +33,11 @@ def plot2d( A 2x2 array specifying the [min, max] bounds for each parameter. If None, uses `cost.parameters.get_bounds_for_plotly`. steps : int, optional - The number of intervals to divide the parameter space into along each dimension (default is 10). + The number of grid points to divide the parameter space into along each dimension (default: 10). show : bool, optional If True, the figure is shown upon creation (default: True). + use_optim_log : bool, optional + If True, the optimisation log is used to shape the cost landscape (default: False). **layout_kwargs : optional Valid Plotly layout keys and their values, e.g. `xaxis_title="Time [s]"` or @@ -68,25 +77,43 @@ def plot2d( # Populate cost matrix for i, xi in enumerate(x): for j, yj in enumerate(y): - costs[j, i] = cost(np.array([xi, yj])) + costs[j, i] = cost(np.asarray([xi, yj])) if gradient: grad_parameter_costs = [] # Determine the number of gradient outputs from cost.evaluateS1 - num_gradients = len(cost.evaluateS1(np.array([x[0], y[0]]))[1]) + num_gradients = len(cost.evaluateS1(np.asarray([x[0], y[0]]))[1]) # Create an array to hold each gradient output & populate grads = [np.zeros((len(y), len(x))) for _ in range(num_gradients)] for i, xi in enumerate(x): for j, yj in enumerate(y): - (*current_grads,) = cost.evaluateS1(np.array([xi, yj]))[1] + (*current_grads,) = cost.evaluateS1(np.asarray([xi, yj]))[1] for k, grad_output in enumerate(current_grads): grads[k][j, i] = grad_output # Append the arrays to the grad_parameter_costs list grad_parameter_costs.extend(grads) + elif plot_optim and use_optim_log: + # Flatten the cost matrix and parameter values + flat_x = np.tile(x, len(y)) + flat_y = np.repeat(y, len(x)) + flat_costs = costs.flatten() + + # Append the optimisation trace to the data + parameter_log = np.asarray(optim.log["x_best"]) + flat_x = np.concatenate((flat_x, parameter_log[:, 0])) + flat_y = np.concatenate((flat_y, parameter_log[:, 1])) + flat_costs = np.concatenate((flat_costs, optim.log["cost"])) + + # Order the parameter values and estimate the cost using interpolation + x = np.unique(flat_x) + y = np.unique(flat_y) + xf, yf = np.meshgrid(x, y) + costs = griddata((flat_x, flat_y), flat_costs, (xf, yf), method="linear") + # Import plotly only when needed go = PlotlyManager().go @@ -107,11 +134,15 @@ def plot2d( layout = go.Layout(layout_options) # Create contour plot and update the layout - fig = go.Figure(data=[go.Contour(x=x, y=y, z=costs)], layout=layout) + fig = go.Figure( + data=[go.Contour(x=x, y=y, z=costs, connectgaps=True)], layout=layout + ) if plot_optim: # Plot the optimisation trace - optim_trace = np.array([item for sublist in optim.log for item in sublist]) + optim_trace = np.asarray( + [item for sublist in optim.log["x"] for item in sublist] + ) optim_trace = optim_trace.reshape(-1, 2) fig.add_trace( go.Scatter( diff --git a/pybop/plotting/plot_convergence.py b/pybop/plotting/plot_convergence.py index 5cf5bb8a..f8ec6948 100644 --- a/pybop/plotting/plot_convergence.py +++ b/pybop/plotting/plot_convergence.py @@ -1,7 +1,5 @@ import sys -import numpy as np - from pybop import StandardPlot @@ -26,25 +24,16 @@ def plot_convergence(optim, show=True, **layout_kwargs): The Plotly figure object for the convergence plot. """ - # Extract the cost function and log from the optimisation object - cost = optim.cost - log = optim.log - - # Find the best cost from each iteration - best_cost_per_iteration = [ - min((cost(solution) for solution in log_entry), default=np.inf) - if optim.minimising - else max((cost(solution) for solution in log_entry), default=-np.inf) - for log_entry in log - ] + # Extract log from the optimisation object + cost_log = optim.log["cost"] # Generate a list of iteration numbers - iteration_numbers = list(range(1, len(best_cost_per_iteration) + 1)) + iteration_numbers = list(range(1, len(cost_log) + 1)) # Create a plotting dictionary plot_dict = StandardPlot( x=iteration_numbers, - y=best_cost_per_iteration, + y=cost_log, layout_options=dict( xaxis_title="Iteration", yaxis_title="Cost", title="Convergence" ), diff --git a/pybop/plotting/plot_parameters.py b/pybop/plotting/plot_parameters.py index 149c0d16..bc1f9a7a 100644 --- a/pybop/plotting/plot_parameters.py +++ b/pybop/plotting/plot_parameters.py @@ -26,10 +26,10 @@ def plot_parameters(optim, show=True, **layout_kwargs): # Extract parameters and log from the optimisation object parameters = optim.cost.parameters - log = optim.log + log = optim.log["x"] # Create a list of sequential integers for the x-axis - x = list(range(len(log[0]) * len(log))) + x = list(range(1, len(log[0]) * len(log) + 1)) # Determine the number of elements in the smallest arrays num_elements = len(log[0][0]) diff --git a/pybop/plotting/plot_problem.py b/pybop/plotting/plot_problem.py index 65812d15..fb8759c9 100644 --- a/pybop/plotting/plot_problem.py +++ b/pybop/plotting/plot_problem.py @@ -3,10 +3,10 @@ import numpy as np from pybop import DesignProblem, FittingProblem, StandardPlot -from pybop.models.base_model import Inputs +from pybop.parameters.parameter import Inputs -def quick_plot(problem, inputs: Inputs = None, show=True, **layout_kwargs): +def quick_plot(problem, problem_inputs: Inputs = None, show=True, **layout_kwargs): """ Quickly plot the target dataset against optimised model output. @@ -17,7 +17,7 @@ def quick_plot(problem, inputs: Inputs = None, show=True, **layout_kwargs): ---------- problem : object Problem object with dataset and signal attributes. - inputs : Inputs + problem_inputs : Inputs Optimised (or example) parameter values. show : bool, optional If True, the figure is shown upon creation (default: True). @@ -31,14 +31,14 @@ def quick_plot(problem, inputs: Inputs = None, show=True, **layout_kwargs): plotly.graph_objs.Figure The Plotly figure object for the scatter plot. """ - if inputs is None: - inputs = problem.parameters.as_dict() + if problem_inputs is None: + problem_inputs = problem.parameters.as_dict() else: - inputs = problem.parameters.verify(inputs) + problem_inputs = problem.parameters.verify(problem_inputs) # Extract the time data and evaluate the model's output and target values xaxis_data = problem.time_data() - model_output = problem.evaluate(inputs) + model_output = problem.evaluate(problem_inputs) target_output = problem.get_target() # Create a plot for each output diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index 9f1853cc..b64aef14 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -1,5 +1,5 @@ from pybop import BaseModel, Dataset, Parameter, Parameters -from pybop.models.base_model import Inputs +from pybop.parameters.parameter import Inputs class BaseProblem: @@ -78,7 +78,7 @@ def evaluate(self, inputs: Inputs): Parameters ---------- inputs : Inputs - Parameters for evaluation of the mmodel. + Parameters for evaluation of the model. Raises ------ @@ -95,7 +95,7 @@ def evaluateS1(self, inputs: Inputs): Parameters ---------- inputs : Inputs - Parameters for evaluation of the mmodel. + Parameters for evaluation of the model. Raises ------ diff --git a/pybop/problems/design_problem.py b/pybop/problems/design_problem.py index d5b5f4e9..b99a9357 100644 --- a/pybop/problems/design_problem.py +++ b/pybop/problems/design_problem.py @@ -1,7 +1,7 @@ import numpy as np from pybop import BaseProblem -from pybop.models.base_model import Inputs +from pybop.parameters.parameter import Inputs class DesignProblem(BaseProblem): diff --git a/pybop/problems/fitting_problem.py b/pybop/problems/fitting_problem.py index 4ebc46ee..5bd5b44a 100644 --- a/pybop/problems/fitting_problem.py +++ b/pybop/problems/fitting_problem.py @@ -2,8 +2,7 @@ from pybop import BaseProblem from pybop._dataset import Dataset -from pybop.models.base_model import Inputs -from pybop.parameters.parameter import Parameters +from pybop.parameters.parameter import Inputs, Parameters class FittingProblem(BaseProblem): @@ -94,13 +93,13 @@ def evaluate(self, inputs: Inputs): inputs = self.parameters.verify(inputs) requires_rebuild = False - for key in inputs.keys(): - if ( - key in self._model.rebuild_parameters - and inputs[key] != self.parameters[key].value - ): - self.parameters[key].update(value=inputs[key]) - requires_rebuild = True + for key, value in inputs.items(): + if key in self._model.rebuild_parameters: + current_value = self.parameters[key].value + if value != current_value: + self.parameters[key].update(value=value) + requires_rebuild = True + if requires_rebuild: self._model.rebuild(parameters=self.parameters) diff --git a/pyproject.toml b/pyproject.toml index 14ec41be..6d2e1b61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "pybop" -version = "24.3" +version = "24.3.1" authors = [ {name = "The PyBOP Team"}, ] @@ -27,7 +27,7 @@ classifiers = [ requires-python = ">=3.9, <3.13" dependencies = [ "pybamm>=23.9", - "numpy>=1.16", + "numpy>=1.16, <2.0", "scipy>=1.3", "pints>=0.5", "bpx>=0.4", diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index 5b9ef4e2..a196ac67 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -13,7 +13,7 @@ class TestOptimisation: @pytest.fixture(autouse=True) def setup(self): - self.ground_truth = np.array([0.55, 0.55]) + np.random.normal( + self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( loc=0.0, scale=0.05, size=2 ) @@ -117,7 +117,5 @@ def get_data(self, model, parameters, x, init_soc): ] * 2 ) - sim = model.predict( - init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict(x) - ) + sim = model.predict(init_soc=init_soc, experiment=experiment, inputs=x) return sim diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 491fd170..20fdee0e 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -11,7 +11,7 @@ class Test_SPM_Parameterisation: @pytest.fixture(autouse=True) def setup(self): - self.ground_truth = np.array([0.55, 0.55]) + np.random.normal( + self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( loc=0.0, scale=0.05, size=2 ) @@ -160,7 +160,7 @@ def spm_two_signal_cost(self, parameters, model, cost_class): [ pybop.SciPyDifferentialEvolution, pybop.IRPropMin, - pybop.CMAES, + pybop.XNES, ], ) @pytest.mark.integration @@ -218,7 +218,7 @@ def test_model_misparameterisation(self, parameters, model, init_soc): cost = pybop.RootMeanSquaredError(problem) # Select optimiser - optimiser = pybop.CMAES + optimiser = pybop.XNES # Build the optimisation problem optim = optimiser(cost=cost) @@ -245,7 +245,5 @@ def get_data(self, model, parameters, x, init_soc): ] * 2 ) - sim = model.predict( - init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict(x) - ) + sim = model.predict(init_soc=init_soc, experiment=experiment, inputs=x) return sim diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 6febd29d..45df6ba4 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -11,7 +11,7 @@ class TestTheveninParameterisation: @pytest.fixture(autouse=True) def setup(self): - self.ground_truth = np.array([0.05, 0.05]) + np.random.normal( + self.ground_truth = np.asarray([0.05, 0.05]) + np.random.normal( loc=0.0, scale=0.01, size=2 ) @@ -102,5 +102,5 @@ def get_data(self, model, parameters, x): ), ] ) - sim = model.predict(experiment=experiment, inputs=parameters.as_dict(x)) + sim = model.predict(experiment=experiment, inputs=x) return sim diff --git a/tests/plotting/test_plotly_manager.py b/tests/plotting/test_plotly_manager.py index fc562ff2..ba0adbd8 100644 --- a/tests/plotting/test_plotly_manager.py +++ b/tests/plotting/test_plotly_manager.py @@ -1,6 +1,6 @@ import subprocess -from distutils.spawn import find_executable from importlib.metadata import distributions +from shutil import which import numpy as np import plotly @@ -10,7 +10,7 @@ from pybop import PlotlyManager # Find the Python executable -python_executable = find_executable("python") +python_executable = which("python") @pytest.fixture(scope="session") @@ -95,7 +95,7 @@ def test_cancel_installation(mocker, uninstall_plotly_if_installed): with pytest.raises(SystemExit) as pytest_wrapped_e: PlotlyManager().prompt_for_plotly_installation() - assert pytest_wrapped_e.type == SystemExit + assert pytest_wrapped_e.type is SystemExit assert pytest_wrapped_e.value.code == 1 assert not is_package_installed("plotly") diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index 310d149b..b99aa5d0 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -131,7 +131,7 @@ def test_gaussian_log_likelihood(self, one_signal_problem): grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) - assert grad_likelihood[0] <= 0 # TEMPORARY WORKAROUND + assert grad_likelihood[0] <= 0 # TEMPORARY WORKAROUND (Remove in #338) @pytest.mark.unit def test_gaussian_log_likelihood_returns_negative_inf(self, one_signal_problem): diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index d8fdf4fa..6809aec8 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -357,8 +357,8 @@ def test_non_converged_solution(self): ) problem = pybop.FittingProblem(model, parameters=parameters, dataset=dataset) - res = problem.evaluate(parameters.as_dict([-0.2, -0.2])) - _, res_grad = problem.evaluateS1(parameters.as_dict([-0.2, -0.2])) + res = problem.evaluate([-0.2, -0.2]) + _, res_grad = problem.evaluateS1([-0.2, -0.2]) for key in problem.signal: assert np.isinf(res.get(key, [])).any() diff --git a/tests/unit/test_observers.py b/tests/unit/test_observers.py index 197db2fb..2d2e3bc6 100644 --- a/tests/unit/test_observers.py +++ b/tests/unit/test_observers.py @@ -73,6 +73,7 @@ def test_observer(self, model, parameters): # Test evaluate with different inputs observer._time_data = t_eval observer.evaluate(parameters.as_dict()) + observer.evaluate(parameters.current_value()) # Test evaluate with dataset observer._dataset = pybop.Dataset( diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 68ba33c3..02b3ea5c 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -125,6 +125,12 @@ def test_parameters_construction(self, parameter): ): params.add(parameter) + with pytest.raises( + Exception, + match="Parameter requires a name.", + ): + params.add(dict(value=2)) + params.remove(parameter_name=parameter.name) # Test parameter addition via dict diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py index 8c05810a..57f0e4ee 100644 --- a/tests/unit/test_plots.py +++ b/tests/unit/test_plots.py @@ -89,7 +89,7 @@ def test_problem_plots(self, fitting_problem, design_problem): pybop.quick_plot(design_problem) # Test conversion of values into inputs - pybop.quick_plot(fitting_problem, inputs=[0.6, 0.6]) + pybop.quick_plot(fitting_problem, problem_inputs=[0.6, 0.6]) @pytest.fixture def cost(self, fitting_problem): @@ -128,6 +128,12 @@ def test_optim_plots(self, optim): # Plot the cost landscape with optimisation path pybop.plot2d(optim, steps=5) + # Plot the cost landscape using optimisation path + pybop.plot2d(optim, steps=5, use_optim_log=True) + + # Plot gradient cost landscape + pybop.plot2d(optim, gradient=True, steps=5) + @pytest.mark.unit def test_with_ipykernel(self, dataset, cost, optim): import ipykernel diff --git a/tests/unit/test_problem.py b/tests/unit/test_problem.py index a7f1dd0c..c2c40a03 100644 --- a/tests/unit/test_problem.py +++ b/tests/unit/test_problem.py @@ -99,6 +99,13 @@ def test_base_problem(self, parameters, model, dataset): match="The input parameters must be a pybop Parameter, a list of pybop.Parameter objects, or a pybop Parameters object.", ): problem = pybop.BaseProblem(parameters="Invalid string") + with pytest.raises( + TypeError, + match="All elements in the list must be pybop.Parameter objects.", + ): + problem = pybop.BaseProblem( + parameters=[parameter_list[0], "Invalid string"] + ) @pytest.mark.unit def test_fitting_problem(self, parameters, dataset, model, signal): @@ -166,8 +173,8 @@ def test_design_problem(self, parameters, experiment, model): ) # building postponed with input experiment # Test model.predict - model.predict(inputs=parameters.as_dict([1e-5, 1e-5]), experiment=experiment) - model.predict(inputs=parameters.as_dict([3e-5, 3e-5]), experiment=experiment) + model.predict(inputs=[1e-5, 1e-5], experiment=experiment) + model.predict(inputs=[3e-5, 3e-5], experiment=experiment) @pytest.mark.unit def test_problem_construct_with_model_predict( @@ -175,16 +182,14 @@ def test_problem_construct_with_model_predict( ): # Construct model and predict model.parameters = parameters - out = model.predict( - inputs=parameters.as_dict([1e-5, 1e-5]), t_eval=np.linspace(0, 10, 100) - ) + out = model.predict(inputs=[1e-5, 1e-5], t_eval=np.linspace(0, 10, 100)) problem = pybop.FittingProblem( model, parameters, dataset=dataset, signal=signal ) # Test problem evaluate - problem_output = problem.evaluate(parameters.as_dict([2e-5, 2e-5])) + problem_output = problem.evaluate([2e-5, 2e-5]) assert problem._model._built_model is not None with pytest.raises(AssertionError): diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py index edefd0ad..2d5727b6 100644 --- a/tests/unit/test_standalone.py +++ b/tests/unit/test_standalone.py @@ -18,14 +18,14 @@ def test_standalone_optimiser(self): assert optim.name() == "StandaloneOptimiser" x, final_cost = optim.run() - assert optim.cost(optim.parameters.initial_value()) > final_cost + assert optim.cost(optim.x0) > final_cost np.testing.assert_allclose(x, [2, 4], atol=1e-2) # Test with bounds optim = StandaloneOptimiser(bounds=dict(upper=[5, 6], lower=[1, 2])) x, final_cost = optim.run() - assert optim.cost(optim.parameters.initial_value()) > final_cost + assert optim.cost(optim.x0) > final_cost np.testing.assert_allclose(x, [2, 4], atol=1e-2) @pytest.mark.unit @@ -35,7 +35,8 @@ def test_optimisation_on_standalone_cost(self): optim = pybop.SciPyDifferentialEvolution(cost=cost) x, final_cost = optim.run() - initial_cost = optim.cost(optim.parameters.initial_value()) + optim.x0 = optim.log["x"][0][0] + initial_cost = optim.cost(optim.x0) assert initial_cost > final_cost np.testing.assert_allclose(final_cost, 42, atol=1e-1)