Skip to content

Commit

Permalink
Update optim.log to store cost (#335)
Browse files Browse the repository at this point in the history
* Update log to store cost

* Track x and x_best separately

* Include optim data in cost matrix

* Update CHANGELOG.md

* Update Parameters getitem

* Increase coverage

* Update test_optimisation.py

* Apply suggestions from code review

Co-authored-by: Brady Planden <[email protected]>

* style: pre-commit fixes

* Update plot2d with use_optim_log

---------

Co-authored-by: Brady Planden <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
3 people authored Jun 17, 2024
1 parent 4e8b0bc commit 4dfdd1d
Show file tree
Hide file tree
Showing 11 changed files with 80 additions and 40 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

## Bug Fixes

- [#165](https://github.com/pybop-team/PyBOP/issues/165) - Stores the attempted and best parameter values and the best cost for each iteration in the log attribute of the optimiser and updates the associated plots.
- [#354](https://github.com/pybop-team/PyBOP/issues/354) - Fixes the calculation of the gradient in the `RootMeanSquaredError` cost.
- [#347](https://github.com/pybop-team/PyBOP/issues/347) - Resets options between MSMR tests to cope with a bug in PyBaMM v23.9 which is fixed in PyBaMM v24.1.
- [#337](https://github.com/pybop-team/PyBOP/issues/337) - Restores benchmarks, relaxes CI schedule for benchmarks and scheduled tests.
Expand Down
6 changes: 3 additions & 3 deletions pybop/optimisers/base_optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ class BaseOptimiser:
If True, the feasibility of the optimised parameters is checked (default: True).
allow_infeasible_solutions : bool, optional
If True, infeasible parameter values will be allowed in the optimisation (default: True).
log : list
A log of the parameter values tried during the optimisation.
log : dict
A log of the parameter values tried during the optimisation and associated costs.
"""

def __init__(
Expand All @@ -54,7 +54,7 @@ def __init__(
self.bounds = None
self.sigma0 = 0.1
self.verbose = False
self.log = []
self.log = dict(x=[], x_best=[], cost=[])
self.minimising = True
self.physical_viability = False
self.allow_infeasible_solutions = False
Expand Down
4 changes: 3 additions & 1 deletion pybop/optimisers/base_pints_optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,9 @@ def f(x, grad=None):
# Update counts
evaluations += len(fs)
iteration += 1
self.log.append(xs)
self.log["x"].append(xs)
self.log["x_best"].append(self.pints_optimiser.x_best())
self.log["cost"].append(fb if self.minimising else -fb)

# Check stopping criteria:
# Maximum number of iterations
Expand Down
20 changes: 14 additions & 6 deletions pybop/optimisers/scipy_optimisers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import numpy as np
from scipy.optimize import differential_evolution, minimize
from scipy.optimize import OptimizeResult, differential_evolution, minimize

from pybop import BaseOptimiser, Result

Expand Down Expand Up @@ -150,11 +150,13 @@ def _run_optimiser(self):
result : scipy.optimize.OptimizeResult
The result of the optimisation including the optimised parameter values and cost.
"""
self.log = [[self.x0]]

# Add callback storing history of parameter values
def callback(x):
self.log.append([x])
def callback(intermediate_result: OptimizeResult):
self.log["x_best"].append(intermediate_result.x)
self.log["cost"].append(
intermediate_result.fun if self.minimising else -intermediate_result.fun
)

# Compute the absolute initial cost and resample if required
self._cost0 = np.abs(self.cost(self.x0))
Expand All @@ -175,6 +177,7 @@ def callback(x):
if not self._options["jac"]:

def cost_wrapper(x):
self.log["x"].append([x])
cost = self.cost(x) / self._cost0
if np.isinf(cost):
self.inf_count += 1
Expand All @@ -183,6 +186,7 @@ def cost_wrapper(x):
elif self._options["jac"] is True:

def cost_wrapper(x):
self.log["x"].append([x])
L, dl = self.cost.evaluateS1(x)
return L, dl if self.minimising else -L, -dl

Expand Down Expand Up @@ -297,10 +301,14 @@ def _run_optimiser(self):
self.x0 = None

# Add callback storing history of parameter values
def callback(x, convergence):
self.log.append([x])
def callback(intermediate_result: OptimizeResult):
self.log["x_best"].append(intermediate_result.x)
self.log["cost"].append(
intermediate_result.fun if self.minimising else -intermediate_result.fun
)

def cost_wrapper(x):
self.log["x"].append([x])
return self.cost(x) if self.minimising else -self.cost(x)

return differential_evolution(
Expand Down
10 changes: 1 addition & 9 deletions pybop/parameters/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def __init__(self, *args):
for param in args:
self.add(param)

def __getitem__(self, key: str):
def __getitem__(self, key: str) -> Parameter:
"""
Return the parameter dictionary corresponding to a particular key.
Expand All @@ -180,15 +180,7 @@ def __getitem__(self, key: str):
-------
pybop.Parameter
The Parameter object.
Raises
------
ValueError
The key must be the name of one of the parameters.
"""
if key not in self.param.keys():
raise ValueError(f"The key {key} is not the name of a parameter.")

return self.param[key]

def __len__(self) -> int:
Expand Down
37 changes: 33 additions & 4 deletions pybop/plotting/plot2d.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,19 @@
import sys

import numpy as np
from scipy.interpolate import griddata

from pybop import BaseOptimiser, Optimisation, PlotlyManager


def plot2d(
cost_or_optim, gradient=False, bounds=None, steps=10, show=True, **layout_kwargs
cost_or_optim,
gradient: bool = False,
bounds: np.ndarray = None,
steps: int = 10,
show: bool = True,
use_optim_log: bool = False,
**layout_kwargs,
):
"""
Plot a 2D visualisation of a cost landscape using Plotly.
Expand All @@ -26,9 +33,11 @@ def plot2d(
A 2x2 array specifying the [min, max] bounds for each parameter. If None, uses
`cost.parameters.get_bounds_for_plotly`.
steps : int, optional
The number of intervals to divide the parameter space into along each dimension (default is 10).
The number of grid points to divide the parameter space into along each dimension (default: 10).
show : bool, optional
If True, the figure is shown upon creation (default: True).
use_optim_log : bool, optional
If True, the optimisation log is used to shape the cost landscape (default: False).
**layout_kwargs : optional
Valid Plotly layout keys and their values,
e.g. `xaxis_title="Time [s]"` or
Expand Down Expand Up @@ -87,6 +96,24 @@ def plot2d(
# Append the arrays to the grad_parameter_costs list
grad_parameter_costs.extend(grads)

elif plot_optim and use_optim_log:
# Flatten the cost matrix and parameter values
flat_x = np.tile(x, len(y))
flat_y = np.repeat(y, len(x))
flat_costs = costs.flatten()

# Append the optimisation trace to the data
parameter_log = np.array(optim.log["x_best"])
flat_x = np.concatenate((flat_x, parameter_log[:, 0]))
flat_y = np.concatenate((flat_y, parameter_log[:, 1]))
flat_costs = np.concatenate((flat_costs, optim.log["cost"]))

# Order the parameter values and estimate the cost using interpolation
x = np.unique(flat_x)
y = np.unique(flat_y)
xf, yf = np.meshgrid(x, y)
costs = griddata((flat_x, flat_y), flat_costs, (xf, yf), method="linear")

# Import plotly only when needed
go = PlotlyManager().go

Expand All @@ -107,11 +134,13 @@ def plot2d(
layout = go.Layout(layout_options)

# Create contour plot and update the layout
fig = go.Figure(data=[go.Contour(x=x, y=y, z=costs)], layout=layout)
fig = go.Figure(
data=[go.Contour(x=x, y=y, z=costs, connectgaps=True)], layout=layout
)

if plot_optim:
# Plot the optimisation trace
optim_trace = np.array([item for sublist in optim.log for item in sublist])
optim_trace = np.array([item for sublist in optim.log["x"] for item in sublist])
optim_trace = optim_trace.reshape(-1, 2)
fig.add_trace(
go.Scatter(
Expand Down
19 changes: 4 additions & 15 deletions pybop/plotting/plot_convergence.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
import sys

import numpy as np

from pybop import StandardPlot


Expand All @@ -26,25 +24,16 @@ def plot_convergence(optim, show=True, **layout_kwargs):
The Plotly figure object for the convergence plot.
"""

# Extract the cost function and log from the optimisation object
cost = optim.cost
log = optim.log

# Find the best cost from each iteration
best_cost_per_iteration = [
min((cost(solution) for solution in log_entry), default=np.inf)
if optim.minimising
else max((cost(solution) for solution in log_entry), default=-np.inf)
for log_entry in log
]
# Extract log from the optimisation object
cost_log = optim.log["cost"]

# Generate a list of iteration numbers
iteration_numbers = list(range(1, len(best_cost_per_iteration) + 1))
iteration_numbers = list(range(1, len(cost_log) + 1))

# Create a plotting dictionary
plot_dict = StandardPlot(
x=iteration_numbers,
y=best_cost_per_iteration,
y=cost_log,
layout_options=dict(
xaxis_title="Iteration", yaxis_title="Cost", title="Convergence"
),
Expand Down
4 changes: 2 additions & 2 deletions pybop/plotting/plot_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ def plot_parameters(optim, show=True, **layout_kwargs):

# Extract parameters and log from the optimisation object
parameters = optim.cost.parameters
log = optim.log
log = optim.log["x"]

# Create a list of sequential integers for the x-axis
x = list(range(len(log[0]) * len(log)))
x = list(range(1, len(log[0]) * len(log) + 1))

# Determine the number of elements in the smallest arrays
num_elements = len(log[0][0])
Expand Down
6 changes: 6 additions & 0 deletions tests/unit/test_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,12 @@ def test_parameters_construction(self, parameter):
):
params.add(parameter)

with pytest.raises(
Exception,
match="Parameter requires a name.",
):
params.add(dict(value=2))

params.remove(parameter_name=parameter.name)

# Test parameter addition via dict
Expand Down
6 changes: 6 additions & 0 deletions tests/unit/test_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,12 @@ def test_optim_plots(self, optim):
# Plot the cost landscape with optimisation path
pybop.plot2d(optim, steps=5)

# Plot the cost landscape using optimisation path
pybop.plot2d(optim, steps=5, use_optim_log=True)

# Plot gradient cost landscape
pybop.plot2d(optim, gradient=True, steps=5)

@pytest.mark.unit
def test_with_ipykernel(self, dataset, cost, optim):
import ipykernel
Expand Down
7 changes: 7 additions & 0 deletions tests/unit/test_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,13 @@ def test_base_problem(self, parameters, model, dataset):
match="The input parameters must be a pybop Parameter, a list of pybop.Parameter objects, or a pybop Parameters object.",
):
problem = pybop.BaseProblem(parameters="Invalid string")
with pytest.raises(
TypeError,
match="All elements in the list must be pybop.Parameter objects.",
):
problem = pybop.BaseProblem(
parameters=[parameter_list[0], "Invalid string"]
)

@pytest.mark.unit
def test_fitting_problem(self, parameters, dataset, model, signal):
Expand Down

0 comments on commit 4dfdd1d

Please sign in to comment.