diff --git a/.github/workflows/periodic_benchmarks.yaml b/.github/workflows/periodic_benchmarks.yaml index 6ea0b489..63771150 100644 --- a/.github/workflows/periodic_benchmarks.yaml +++ b/.github/workflows/periodic_benchmarks.yaml @@ -10,9 +10,9 @@ # - Publish website name: Benchmarks on: - # Everyday at 12 pm UTC + # Every Monday and Thursday at 12 pm UTC schedule: - - cron: "0 12 * * *" + - cron: "0 12 * * 1,4" # Make it possible to trigger the # workflow manually workflow_dispatch: diff --git a/.github/workflows/scheduled_tests.yaml b/.github/workflows/scheduled_tests.yaml index c91599a0..ade88188 100644 --- a/.github/workflows/scheduled_tests.yaml +++ b/.github/workflows/scheduled_tests.yaml @@ -6,10 +6,9 @@ on: branches: - main - # runs every day at 09:00 and 15:00 UTC + # runs every day at 09:00 UTC schedule: - cron: '0 9 * * *' - - cron: '0 15 * * *' # Check noxfile.py for associated environment variables env: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2818fed3..1c47f7b3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ ci: repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.4.5" + rev: "v0.4.6" hooks: - id: ruff args: [--fix, --show-fixes] diff --git a/CHANGELOG.md b/CHANGELOG.md index c0b75954..5facc001 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ ## Bug Fixes +- [#337](https://github.com/pybop-team/PyBOP/issues/337) - Restores benchmarks, relaxes CI schedule for benchmarks and scheduled tests. - [#231](https://github.com/pybop-team/PyBOP/issues/231) - Allows passing of keyword arguments to PyBaMM models and disables build on initialisation. - [#321](https://github.com/pybop-team/PyBOP/pull/321) - Improves `integration/test_spm_parameterisation.py` stability, adds flakly pytest plugin, and `test_thevenin_parameterisation.py` integration test. - [#330](https://github.com/pybop-team/PyBOP/issues/330) - Fixes implementation of default plotting options. diff --git a/benchmarks/benchmark_parameterisation.py b/benchmarks/benchmark_parameterisation.py index 76976887..8440ba5e 100644 --- a/benchmarks/benchmark_parameterisation.py +++ b/benchmarks/benchmark_parameterisation.py @@ -82,12 +82,26 @@ def setup(self, model, parameter_set, optimiser): # Create cost function cost = pybop.SumSquaredError(problem=problem) - # Create optimization instance - self.optim = pybop.Optimisation(cost, optimiser=optimiser) + # Create optimization instance and set options for consistent benchmarking if optimiser in [pybop.GradientDescent]: - self.optim.optimiser.set_learning_rate( - 0.008 - ) # Compromise between stability & performance + self.optim = pybop.Optimisation( + cost, + optimiser=optimiser, + max_iterations=250, + max_unchanged_iterations=25, + threshold=1e-5, + min_iterations=2, + learning_rate=0.008, # Compromise between stability & performance + ) + else: + self.optim = pybop.Optimisation( + cost, + optimiser=optimiser, + max_iterations=250, + max_unchanged_iterations=25, + threshold=1e-5, + min_iterations=2, + ) def time_parameterisation(self, model, parameter_set, optimiser): """ @@ -99,10 +113,6 @@ def time_parameterisation(self, model, parameter_set, optimiser): parameter_set (str): The name of the parameter set being used (unused). optimiser (pybop.Optimiser): The optimizer class being used (unused). """ - # Set optimizer options for consistent benchmarking - self.optim.set_max_unchanged_iterations(iterations=25, threshold=1e-5) - self.optim.set_max_iterations(250) - self.optim.set_min_iterations(2) self.optim.run() def time_optimiser_ask(self, model, parameter_set, optimiser): @@ -115,4 +125,4 @@ def time_optimiser_ask(self, model, parameter_set, optimiser): optimiser (pybop.Optimiser): The optimizer class being used. """ if optimiser not in [pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution]: - self.optim.optimiser.ask() + self.optim.pints_optimiser.ask() diff --git a/benchmarks/benchmark_track_parameterisation.py b/benchmarks/benchmark_track_parameterisation.py index 793ab462..fac2d54a 100644 --- a/benchmarks/benchmark_track_parameterisation.py +++ b/benchmarks/benchmark_track_parameterisation.py @@ -82,12 +82,26 @@ def setup(self, model, parameter_set, optimiser): # Create cost function cost = pybop.SumSquaredError(problem=problem) - # Create optimization instance - self.optim = pybop.Optimisation(cost, optimiser=optimiser) + # Create optimization instance and set options for consistent benchmarking if optimiser in [pybop.GradientDescent]: - self.optim.optimiser.set_learning_rate( - 0.008 - ) # Compromise between stability & performance + self.optim = pybop.Optimisation( + cost, + optimiser=optimiser, + max_iterations=250, + max_unchanged_iterations=25, + threshold=1e-5, + min_iterations=2, + learning_rate=0.008, # Compromise between stability & performance + ) + else: + self.optim = pybop.Optimisation( + cost, + optimiser=optimiser, + max_iterations=250, + max_unchanged_iterations=25, + threshold=1e-5, + min_iterations=2, + ) # Track output results self.x = self.results_tracking(model, parameter_set, optimiser) @@ -110,10 +124,5 @@ def results_tracking(self, model, parameter_set, optimiser): parameter_set (str): The name of the parameter set being used (unused). optimiser (pybop.Optimiser): The optimizer class being used (unused). """ - - # Set optimizer options for consistent benchmarking - self.optim.set_max_unchanged_iterations(iterations=25, threshold=1e-5) - self.optim.set_max_iterations(250) - self.optim.set_min_iterations(2) x, _ = self.optim.run() return x diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 713df4d4..1a600d7e 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -90,9 +90,13 @@ def __init__( self.set_base_options() self._set_up_optimiser() - # Throw an error if any options remain + # Throw an warning if any options remain if self.unset_options: - raise ValueError(f"Unrecognised keyword arguments: {self.unset_options}") + warnings.warn( + f"Unrecognised keyword arguments: {self.unset_options} will not be used.", + UserWarning, + stacklevel=2, + ) def set_base_options(self): """ diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index eada8d9b..470bfe0d 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -231,14 +231,14 @@ def test_model_misparameterisation(self, parameters, model, init_soc): optimiser = pybop.CMAES # Build the optimisation problem - parameterisation = optimiser(cost=cost) + optim = optimiser(cost=cost) + initial_cost = optim.cost(cost.x0) # Run the optimisation problem - x, final_cost = parameterisation.run() + x, final_cost = optim.run() # Assertion for final_cost - with np.testing.assert_raises(AssertionError): - np.testing.assert_allclose(final_cost, 0, atol=1e-2) + assert initial_cost > final_cost # Assertion for x with np.testing.assert_raises(AssertionError): diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 949fe7ef..2f407885 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pytest @@ -155,11 +157,12 @@ def test_optimiser_kwargs(self, cost, optimiser): threshold=1e-2, max_evaluations=20, ) - with pytest.raises( - ValueError, - match="Unrecognised keyword arguments", + with pytest.warns( + UserWarning, + match="Unrecognised keyword arguments: {'unrecognised': 10} will not be used.", ): - optim = optimiser(cost=cost, tol=1e-3) + warnings.simplefilter("always") + optim = optimiser(cost=cost, unrecognised=10) else: # Check bounds in list format and update tol bounds = [