diff --git a/CHANGELOG.md b/CHANGELOG.md index e44e092a..e5e93849 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ ## Bug Fixes +- [#338](https://github.com/pybop-team/PyBOP/pull/338) - Fixes GaussianLogLikelihood class, adds integration tests, updates non-bounded parameter implementation by applying bounds from priors and `boundary_multiplier` argument. Bugfixes to CMAES construction. - [#339](https://github.com/pybop-team/PyBOP/issues/339) - Updates the calculation of the cyclable lithium capacity in the spme_max_energy example. - [#387](https://github.com/pybop-team/PyBOP/issues/387) - Adds keys to ParameterSet and updates ECM OCV check. - [#380](https://github.com/pybop-team/PyBOP/pull/380) - Restore self._boundaries construction for `pybop.PSO` diff --git a/examples/scripts/spm_MAP.py b/examples/scripts/spm_MAP.py index dc135fdc..f613eccb 100644 --- a/examples/scripts/spm_MAP.py +++ b/examples/scripts/spm_MAP.py @@ -2,8 +2,16 @@ import pybop -# Define model +# Construct and update initial parameter values parameter_set = pybop.ParameterSet.pybamm("Chen2020") +parameter_set.update( + { + "Negative electrode active material volume fraction": 0.63, + "Positive electrode active material volume fraction": 0.51, + } +) + +# Define model model = pybop.lithium_ion.SPM(parameter_set=parameter_set) # Fitting parameters @@ -12,21 +20,16 @@ "Negative electrode active material volume fraction", prior=pybop.Gaussian(0.6, 0.05), bounds=[0.5, 0.8], + true_value=parameter_set["Negative electrode active material volume fraction"], ), pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.48, 0.05), bounds=[0.4, 0.7], + true_value=parameter_set["Positive electrode active material volume fraction"], ), ) -# Set initial parameter values -parameter_set.update( - { - "Negative electrode active material volume fraction": 0.63, - "Positive electrode active material volume fraction": 0.51, - } -) # Generate data sigma = 0.005 t_eval = np.arange(0, 900, 3) @@ -44,8 +47,8 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) -cost = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma) -optim = pybop.CMAES( +cost = pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0=sigma) +optim = pybop.AdamW( cost, max_unchanged_iterations=20, min_iterations=20, @@ -54,6 +57,7 @@ # Run the optimisation x, final_cost = optim.run() +print("True parameters:", parameters.true_value()) print("Estimated parameters:", x) # Plot the timeseries output diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py index d5d6e641..9c9b3f36 100644 --- a/examples/scripts/spm_MLE.py +++ b/examples/scripts/spm_MLE.py @@ -16,7 +16,6 @@ pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.48, 0.05), - bounds=[0.4, 0.7], ), ) @@ -44,8 +43,8 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) -likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=[0.03, 0.03]) -optim = pybop.CMAES( +likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma0=sigma) +optim = pybop.IRPropMin( likelihood, max_unchanged_iterations=20, min_iterations=20, @@ -57,7 +56,7 @@ print("Estimated parameters:", x) # Plot the timeseries output -pybop.quick_plot(problem, problem_inputs=x[0:2], title="Optimised Comparison") +pybop.quick_plot(problem, problem_inputs=x, title="Optimised Comparison") # Plot convergence pybop.plot_convergence(optim) diff --git a/pybop/__init__.py b/pybop/__init__.py index 61971e95..7b3d77be 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -86,7 +86,6 @@ RootMeanSquaredError, SumSquaredError, ObserverCost, - MAP, ) from .costs.design_costs import ( DesignCost, @@ -97,6 +96,7 @@ BaseLikelihood, GaussianLogLikelihood, GaussianLogLikelihoodKnownSigma, + MAP, ) # diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 9406572b..6d5edb38 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -1,7 +1,11 @@ +from typing import List, Tuple, Union + import numpy as np from pybop.costs.base_cost import BaseCost -from pybop.parameters.parameter import Inputs +from pybop.parameters.parameter import Inputs, Parameter, Parameters +from pybop.parameters.priors import Uniform +from pybop.problems.base_problem import BaseProblem class BaseLikelihood(BaseCost): @@ -9,7 +13,7 @@ class BaseLikelihood(BaseCost): Base class for likelihoods """ - def __init__(self, problem): + def __init__(self, problem: BaseProblem): super(BaseLikelihood, self).__init__(problem) self.n_time_data = problem.n_time_data @@ -22,92 +26,74 @@ class GaussianLogLikelihoodKnownSigma(BaseLikelihood): Parameters ---------- - sigma : scalar or array + sigma0 : scalar or array Initial standard deviation around ``x0``. Either a scalar value (one standard deviation for all coordinates) or an array with one entry - per dimension. Not all methods will use this information. + per dimension. """ - def __init__(self, problem, sigma): + def __init__(self, problem: BaseProblem, sigma0: Union[List[float], float]): super(GaussianLogLikelihoodKnownSigma, self).__init__(problem) - self.sigma = None - self.set_sigma(sigma) - self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi / self.sigma) - self._multip = -1 / (2.0 * self.sigma**2) - self.sigma2 = self.sigma**-2 + sigma0 = self.check_sigma0(sigma0) + self.sigma2 = sigma0**2.0 + self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi * self.sigma2) + self._multip = -1 / (2.0 * self.sigma2) self._dl = np.ones(self.n_parameters) - def set_sigma(self, sigma): - """ - Setter for sigma parameter - """ - if sigma is None: - raise ValueError( - "The GaussianLogLikelihoodKnownSigma cost requires sigma to be " - + "either a scalar value or an array with one entry per dimension." - ) - - if not isinstance(sigma, np.ndarray): - sigma = np.asarray(sigma) - - if not np.issubdtype(sigma.dtype, np.number): - raise ValueError("Sigma must contain only numeric values") - - if np.any(sigma <= 0): - raise ValueError("Sigma must be positive") - else: - self.sigma = sigma - - def get_sigma(self): + def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: """ - Getter for sigma parameter - """ - return self.sigma - - def _evaluate(self, inputs: Inputs, grad=None): - """ - Calls the problem.evaluate method and calculates - the log-likelihood + Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ y = self.problem.evaluate(inputs) + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf # prediction length doesn't match target - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - return -np.float64(np.inf) # prediction doesn't match target - - e = np.asarray( + e = np.sum( [ np.sum( self._offset - + self._multip * np.sum((self._target[signal] - y[signal]) ** 2) + + self._multip * np.sum((self._target[signal] - y[signal]) ** 2.0) ) for signal in self.signal ] ) - if self.n_outputs == 1: - return e.item() - else: - return np.sum(e) + return e if self.n_outputs != 1 else e.item() - def _evaluateS1(self, inputs: Inputs, grad=None): + def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calls the problem.evaluateS1 method and calculates the log-likelihood and gradient. """ y, dy = self.problem.evaluateS1(inputs) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - likelihood = np.float64(np.inf) - dl = self._dl * np.ones(self.n_parameters) - return -likelihood, -dl + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf, -self._dl - r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) likelihood = self._evaluate(inputs) - dl = np.sum((self.sigma2 * np.sum((r * dy.T), axis=2)), axis=1) + + r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) + dl = np.sum((np.sum((r * dy.T), axis=2) / self.sigma2), axis=1) + return likelihood, dl + def check_sigma0(self, sigma0: Union[np.ndarray, float]): + """ + Check the validity of sigma0. + """ + sigma0 = np.asarray(sigma0, dtype=float) + if not np.all(sigma0 > 0): + raise ValueError("Sigma0 must be positive") + if np.shape(sigma0) not in [(), (1,), (self.n_outputs,)]: + raise ValueError( + "sigma0 must be either a scalar value (one standard deviation for " + + "all coordinates) or an array with one entry per dimension." + ) + return sigma0 + class GaussianLogLikelihood(BaseLikelihood): """ @@ -115,18 +101,79 @@ class GaussianLogLikelihood(BaseLikelihood): data follows a Gaussian distribution and computes the log-likelihood of observed data under this assumption. + This class estimates the standard deviation of the Gaussian distribution + alongside the parameters of the model. + Attributes ---------- _logpi : float Precomputed offset value for the log-likelihood function. + _dsigma_scale : float + Scale factor for derivative of standard deviation. """ - def __init__(self, problem): + def __init__( + self, + problem: BaseProblem, + sigma0: Union[float, List[float], List[Parameter]] = 0.002, + dsigma_scale: float = 1.0, + ): super(GaussianLogLikelihood, self).__init__(problem) + self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) - self._dl = np.ones(self.n_parameters + self.n_outputs) - def _evaluate(self, inputs: Inputs, grad=None): + self.sigma = Parameters() + self._add_sigma_parameters(sigma0) + self.parameters.join(self.sigma) + self._dl = np.ones(self.n_parameters) + + def _add_sigma_parameters(self, sigma0): + sigma0 = [sigma0] if not isinstance(sigma0, List) else sigma0 + sigma0 = self._pad_sigma0(sigma0) + + for i, value in enumerate(sigma0): + self._add_single_sigma(i, value) + + def _pad_sigma0(self, sigma0): + if len(sigma0) < self.n_outputs: + return np.pad( + sigma0, + (0, self.n_outputs - len(sigma0)), + constant_values=sigma0[-1], + ) + return sigma0 + + def _add_single_sigma(self, index, value): + if isinstance(value, Parameter): + self.sigma.add(value) + elif isinstance(value, (int, float)): + self.sigma.add( + Parameter( + f"Sigma for output {index+1}", + initial_value=value, + prior=Uniform(0.5 * value, 1.5 * value), + ) + ) + else: + raise TypeError( + f"Expected sigma0 to contain Parameter objects or numeric values. " + f"Received {type(value)}" + ) + + @property + def dsigma_scale(self): + """ + Scaling factor for the dsigma term in the gradient calculation. + """ + return self._dsigma_scale + + @dsigma_scale.setter + def dsigma_scale(self, new_value): + if new_value < 0: + raise ValueError("dsigma_scale must be non-negative") + self._dsigma_scale = new_value + + def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: """ Evaluates the Gaussian log-likelihood for the given parameters. @@ -136,56 +183,175 @@ def _evaluate(self, inputs: Inputs, grad=None): The parameters for which to evaluate the log-likelihood, including the `n_outputs` standard deviations of the Gaussian distributions. - Returns: - float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. + Returns + ------- + float + The log-likelihood value, or -inf if the standard deviations are non-positive. """ - sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) + self.parameters.update(values=list(inputs.values())) + sigma = self.sigma.current_value() if np.any(sigma <= 0): return -np.inf - y = self.problem.evaluate(inputs) - - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - return -np.float64(np.inf) # prediction doesn't match target + y = self.problem.evaluate(self.problem.parameters.as_dict()) + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf # prediction length doesn't match target - e = np.asarray( + e = np.sum( [ np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum((self._target[signal] - y[signal]) ** 2) / (2.0 * sigma**2) + - np.sum((self._target[signal] - y[signal]) ** 2.0) + / (2.0 * sigma**2.0) ) for signal in self.signal ] ) - if self.n_outputs == 1: - return e.item() - else: - return np.sum(e) + return e if self.n_outputs != 1 else e.item() - def _evaluateS1(self, inputs: Inputs, grad=None): + def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: """ - Calls the problem.evaluateS1 method and calculates - the log-likelihood + Calls the problem.evaluateS1 method and calculates the log-likelihood. + + Parameters + ---------- + inputs : Inputs + The parameters for which to evaluate the log-likelihood. + + Returns + ------- + Tuple[float, np.ndarray] + The log-likelihood and its gradient. """ - sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) + self.parameters.update(values=list(inputs.values())) + sigma = self.sigma.current_value() if np.any(sigma <= 0): - return -np.float64(np.inf), -self._dl * np.ones(self.n_parameters) + return -np.inf, -self._dl - y, dy = self.problem.evaluateS1(inputs) - for key in self.signal: - if len(y.get(key, [])) != len(self._target.get(key, [])): - likelihood = np.float64(np.inf) - dl = self._dl * np.ones(self.n_parameters) - return -likelihood, -dl + y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) + if any( + len(y.get(key, [])) != len(self._target.get(key, [])) for key in self.signal + ): + return -np.inf, -self._dl - r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) likelihood = self._evaluate(inputs) - dl = sigma ** (-2.0) * np.sum((r * dy.T), axis=2) - dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=1) + + r = np.asarray([self._target[signal] - y[signal] for signal in self.signal]) + dl = np.sum((np.sum((r * dy.T), axis=2) / (sigma**2.0)), axis=1) + dsigma = ( + -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) + ) / self._dsigma_scale dl = np.concatenate((dl.flatten(), dsigma)) + return likelihood, dl + + +class MAP(BaseLikelihood): + """ + Maximum a posteriori cost function. + + Computes the maximum a posteriori cost function, which is the sum of the + log likelihood and the log prior. The goal of maximising is achieved by + setting minimising = False in the optimiser settings. + + Inherits all parameters and attributes from ``BaseLikelihood``. + + """ + + def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-2): + super(MAP, self).__init__(problem) + self.sigma0 = sigma0 + self.gradient_step = gradient_step + if self.sigma0 is None: + self.sigma0 = [] + for param in self.problem.parameters: + self.sigma0.append(param.prior.sigma) + + try: + self.likelihood = likelihood(problem=self.problem, sigma0=self.sigma0) + except Exception as e: + raise ValueError( + f"An error occurred when constructing the Likelihood class: {e}" + ) + + if hasattr(self, "likelihood") and not isinstance( + self.likelihood, BaseLikelihood + ): + raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") + + def _evaluate(self, inputs: Inputs, grad=None) -> float: + """ + Calculate the maximum a posteriori cost for a given set of parameters. + + Parameters + ---------- + inputs : Inputs + The parameters for which to evaluate the cost. + grad : array-like, optional + An array to store the gradient of the cost function with respect + to the parameters. + + Returns + ------- + float + The maximum a posteriori cost. + """ + log_likelihood = self.likelihood._evaluate(inputs) + log_prior = sum( + self.parameters[key].prior.logpdf(value) for key, value in inputs.items() + ) + + posterior = log_likelihood + log_prior + return posterior + + def _evaluateS1(self, inputs: Inputs) -> Tuple[float, np.ndarray]: + """ + Compute the maximum a posteriori with respect to the parameters. + The method passes the likelihood gradient to the optimiser without modification. + + Parameters + ---------- + inputs : Inputs + The parameters for which to compute the cost and gradient. + + Returns + ------- + tuple + A tuple containing the cost and the gradient. The cost is a float, + and the gradient is an array-like of the same length as `x`. + + Raises + ------ + ValueError + If an error occurs during the calculation of the cost or gradient. + """ + log_likelihood, dl = self.likelihood._evaluateS1(inputs) + log_prior = sum( + self.parameters[key].prior.logpdf(value) for key, value in inputs.items() + ) + + # Compute a finite difference approximation of the gradient of the log prior + delta = self.parameters.initial_value() * self.gradient_step + prior_gradient = [] + + for parameter, step_size in zip(self.problem.parameters, delta): + param_value = inputs[parameter.name] + + log_prior_upper = parameter.prior.logpdf(param_value * (1 + step_size)) + log_prior_lower = parameter.prior.logpdf(param_value * (1 - step_size)) + + gradient = (log_prior_upper - log_prior_lower) / ( + 2 * step_size * param_value + np.finfo(float).eps + ) + prior_gradient.append(gradient) + + posterior = log_likelihood + log_prior + total_gradient = dl + prior_gradient + + return posterior, total_gradient diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 659e3f7f..b5ad603c 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -27,7 +27,7 @@ def __init__(self, problem=None): self.problem = problem if isinstance(self.problem, BaseProblem): self._target = self.problem._target - self.parameters = self.problem.parameters + self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 3cb57ec9..6315c4b6 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -1,6 +1,5 @@ import numpy as np -from pybop.costs._likelihoods import BaseLikelihood from pybop.costs.base_cost import BaseCost from pybop.observers.observer import Observer from pybop.parameters.parameter import Inputs @@ -278,90 +277,3 @@ def evaluateS1(self, inputs: Inputs): If an error occurs during the calculation of the cost or gradient. """ raise NotImplementedError - - -class MAP(BaseLikelihood): - """ - Maximum a posteriori cost function. - - Computes the maximum a posteriori cost function, which is the sum of the - log likelihood and the log prior. The goal of maximising is achieved by - setting minimising = False in the optimiser settings. - - Inherits all parameters and attributes from ``BaseLikelihood``. - - """ - - def __init__(self, problem, likelihood, sigma=None): - super(MAP, self).__init__(problem) - self.sigma0 = sigma - if self.sigma0 is None: - self.sigma0 = [] - for param in self.problem.parameters: - self.sigma0.append(param.prior.sigma) - - try: - self.likelihood = likelihood(problem=self.problem, sigma=self.sigma0) - except Exception as e: - raise ValueError( - f"An error occurred when constructing the Likelihood class: {e}" - ) - - if hasattr(self, "likelihood") and not isinstance( - self.likelihood, BaseLikelihood - ): - raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") - - def _evaluate(self, inputs: Inputs, grad=None): - """ - Calculate the maximum a posteriori cost for a given set of parameters. - - Parameters - ---------- - inputs : Inputs - The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. - - Returns - ------- - float - The maximum a posteriori cost. - """ - log_likelihood = self.likelihood._evaluate(inputs) - log_prior = sum( - self.parameters[key].prior.logpdf(value) for key, value in inputs.items() - ) - - posterior = log_likelihood + log_prior - return posterior - - def _evaluateS1(self, inputs: Inputs): - """ - Compute the maximum a posteriori with respect to the parameters. - The method passes the likelihood gradient to the optimiser without modification. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost and gradient. - - Returns - ------- - tuple - A tuple containing the cost and the gradient. The cost is a float, - and the gradient is an array-like of the same length as `x`. - - Raises - ------ - ValueError - If an error occurs during the calculation of the cost or gradient. - """ - log_likelihood, dl = self.likelihood._evaluateS1(inputs) - log_prior = sum( - self.parameters[key].prior.logpdf(inputs[key]) for key in inputs.keys() - ) - - posterior = log_likelihood + log_prior - return posterior, dl diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py index a016bbc6..e9ba3d6c 100644 --- a/pybop/models/base_model.py +++ b/pybop/models/base_model.py @@ -73,10 +73,6 @@ def __init__(self, name="Base Model", parameter_set=None): self.param_check_counter = 0 self.allow_infeasible_solutions = True - @property - def n_parameters(self): - return len(self.parameters) - def build( self, dataset: Dataset = None, @@ -222,8 +218,8 @@ def rebuild( The initial state of charge to be used in simulations. """ self.dataset = dataset + if parameters is not None: - self.parameters = parameters self.classify_and_update_parameters(parameters) if init_soc is not None: @@ -242,7 +238,7 @@ def rebuild( # Clear solver and setup model self._solver._model_set_up = {} - def classify_and_update_parameters(self, parameters: Union[Parameters, Dict]): + def classify_and_update_parameters(self, parameters: Parameters): """ Update the parameter values according to their classification as either 'rebuild_parameters' which require a model rebuild and @@ -250,10 +246,16 @@ def classify_and_update_parameters(self, parameters: Union[Parameters, Dict]): Parameters ---------- - parameters : pybop.ParameterSet + parameters : pybop.Parameters """ - parameter_dictionary = parameters.as_dict() + if parameters is None: + self.parameters = Parameters() + else: + self.parameters = parameters + + parameter_dictionary = self.parameters.as_dict() + rebuild_parameters = { param: parameter_dictionary[param] for param in parameter_dictionary @@ -274,6 +276,9 @@ def classify_and_update_parameters(self, parameters: Union[Parameters, Dict]): self._unprocessed_parameter_set = self._parameter_set self.geometry = self.pybamm_model.default_geometry + # Update the list of parameter names and number of parameters + self._n_parameters = len(self.parameters) + def reinit( self, inputs: Inputs, t: float = 0.0, x: Optional[np.ndarray] = None ) -> TimeSeriesState: @@ -425,7 +430,7 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): ( sol[self.signal[0]].data.shape[0], self.n_outputs, - self.n_parameters, + self._n_parameters, ) ) diff --git a/pybop/models/empirical/ecm.py b/pybop/models/empirical/ecm.py index d2d97d6d..893b30bc 100644 --- a/pybop/models/empirical/ecm.py +++ b/pybop/models/empirical/ecm.py @@ -51,7 +51,7 @@ def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): Parameters ---------- - inputs : Dict + inputs : Inputs The input parameters for the simulation. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). diff --git a/pybop/models/lithium_ion/base_echem.py b/pybop/models/lithium_ion/base_echem.py index 523d5fb0..fd4aa268 100644 --- a/pybop/models/lithium_ion/base_echem.py +++ b/pybop/models/lithium_ion/base_echem.py @@ -1,11 +1,8 @@ import warnings -from typing import Dict from pybamm import lithium_ion as pybamm_lithium_ion -from pybop.models.base_model import BaseModel - -Inputs = Dict[str, float] +from pybop.models.base_model import BaseModel, Inputs class EChemBaseModel(BaseModel): diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index ba433063..4693468f 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -160,8 +160,7 @@ def run(self): # Store the optimised parameters x = self.result.x - if hasattr(self.cost, "parameters"): - self.store_optimised_parameters(x) + self.parameters.update(values=x) # Check if parameters are viable if self.physical_viability: diff --git a/pybop/optimisers/pints_optimisers.py b/pybop/optimisers/pints_optimisers.py index 2f99e5ef..5d616756 100644 --- a/pybop/optimisers/pints_optimisers.py +++ b/pybop/optimisers/pints_optimisers.py @@ -268,8 +268,8 @@ class CMAES(BasePintsOptimiser): """ def __init__(self, cost, **optimiser_kwargs): - x0 = optimiser_kwargs.pop("x0", cost.parameters.initial_value()) - if x0 is not None and len(x0) == 1: + x0 = optimiser_kwargs.get("x0", cost.parameters.initial_value()) + if len(x0) == 1 or len(cost.parameters) == 1: raise ValueError( "CMAES requires optimisation of >= 2 parameters at once. " + "Please choose another optimiser." diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index e1a828af..67f1896d 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -127,7 +127,7 @@ def set_margin(self, margin): self.margin = margin - def set_bounds(self, bounds=None): + def set_bounds(self, bounds=None, boundary_multiplier=6): """ Set the upper and lower bounds. @@ -136,6 +136,9 @@ def set_bounds(self, bounds=None): bounds : tuple, optional A tuple defining the lower and upper bounds for the parameter. Defaults to None. + boundary_multiplier : float, optional + Used to define the bounds when no bounds are passed but the parameter has + a prior distribution (default: 6). Raises ------ @@ -149,9 +152,24 @@ def set_bounds(self, bounds=None): else: self.lower_bound = bounds[0] self.upper_bound = bounds[1] + elif self.prior is not None: + self.lower_bound = self.prior.mean - boundary_multiplier * self.prior.sigma + self.upper_bound = self.prior.mean + boundary_multiplier * self.prior.sigma + bounds = [self.lower_bound, self.upper_bound] + print("Default bounds applied based on prior distribution.") self.bounds = bounds + def get_initial_value(self) -> float: + """ + Return the initial value of each parameter. + """ + if self.initial_value is None: + sample = self.rvs(1) + self.update(initial_value=sample[0]) + + return self.initial_value + class Parameters: """ @@ -351,7 +369,7 @@ def get_sigma0(self) -> List: return sigma0 - def initial_value(self) -> List: + def initial_value(self) -> np.ndarray: """ Return the initial value of each parameter. """ @@ -363,9 +381,9 @@ def initial_value(self) -> List: param.update(initial_value=initial_value) initial_values.append(param.initial_value) - return initial_values + return np.asarray(initial_values) - def current_value(self) -> List: + def current_value(self) -> np.ndarray: """ Return the current value of each parameter. """ @@ -374,9 +392,9 @@ def current_value(self) -> List: for param in self.param.values(): current_values.append(param.value) - return current_values + return np.asarray(current_values) - def true_value(self) -> List: + def true_value(self) -> np.ndarray: """ Return the true value of each parameter. """ @@ -385,7 +403,7 @@ def true_value(self) -> List: for param in self.param.values(): true_values.append(param.true_value) - return true_values + return np.asarray(true_values) def get_bounds_for_plotly(self): """ diff --git a/pybop/plotting/plot2d.py b/pybop/plotting/plot2d.py index 961bc7c4..ee8d7057 100644 --- a/pybop/plotting/plot2d.py +++ b/pybop/plotting/plot2d.py @@ -1,4 +1,5 @@ import sys +import warnings import numpy as np from scipy.interpolate import griddata @@ -63,6 +64,24 @@ def plot2d( cost = cost_or_optim plot_optim = False + if len(cost.parameters) < 2: + raise ValueError("This cost function takes fewer than 2 parameters.") + + additional_values = [] + if len(cost.parameters) > 2: + warnings.warn( + "This cost function requires more than 2 parameters. " + + "Plotting in 2d with fixed values for the additional parameters.", + UserWarning, + ) + for ( + i, + param, + ) in enumerate(cost.parameters): + if i > 1: + additional_values.append(param.value) + print(f"Fixed {param.name}:", param.value) + # Set up parameter bounds if bounds is None: bounds = cost.parameters.get_bounds_for_plotly() @@ -77,19 +96,23 @@ def plot2d( # Populate cost matrix for i, xi in enumerate(x): for j, yj in enumerate(y): - costs[j, i] = cost(np.asarray([xi, yj])) + costs[j, i] = cost(np.asarray([xi, yj] + additional_values)) if gradient: grad_parameter_costs = [] # Determine the number of gradient outputs from cost.evaluateS1 - num_gradients = len(cost.evaluateS1(np.asarray([x[0], y[0]]))[1]) + num_gradients = len( + cost.evaluateS1(np.asarray([x[0], y[0]] + additional_values))[1] + ) # Create an array to hold each gradient output & populate grads = [np.zeros((len(y), len(x))) for _ in range(num_gradients)] for i, xi in enumerate(x): for j, yj in enumerate(y): - (*current_grads,) = cost.evaluateS1(np.asarray([xi, yj]))[1] + (*current_grads,) = cost.evaluateS1( + np.asarray([xi, yj] + additional_values) + )[1] for k, grad_output in enumerate(current_grads): grads[k][j, i] = grad_output @@ -141,7 +164,7 @@ def plot2d( if plot_optim: # Plot the optimisation trace optim_trace = np.asarray( - [item for sublist in optim.log["x"] for item in sublist] + [item[:2] for sublist in optim.log["x"] for item in sublist] ) optim_trace = optim_trace.reshape(-1, 2) fig.add_trace( diff --git a/pybop/plotting/plot_parameters.py b/pybop/plotting/plot_parameters.py index bc1f9a7a..94cb71cb 100644 --- a/pybop/plotting/plot_parameters.py +++ b/pybop/plotting/plot_parameters.py @@ -1,6 +1,6 @@ import sys -from pybop import StandardSubplot +from pybop import GaussianLogLikelihood, StandardSubplot def plot_parameters(optim, show=True, **layout_kwargs): @@ -51,6 +51,10 @@ def plot_parameters(optim, show=True, **layout_kwargs): for name in trace_names: axis_titles.append(("Function Call", name)) + if isinstance(optim.cost, GaussianLogLikelihood): + axis_titles.append(("Function Call", "Sigma")) + trace_names.append("Sigma") + # Set subplot layout options layout_options = dict( title="Parameter Convergence", diff --git a/pybop/problems/design_problem.py b/pybop/problems/design_problem.py index b99a9357..a1efa22f 100644 --- a/pybop/problems/design_problem.py +++ b/pybop/problems/design_problem.py @@ -55,7 +55,7 @@ def __init__( # Build the model if required if experiment is not None: # Leave the build until later to apply the experiment - self._model.parameters = self.parameters + self._model.classify_and_update_parameters(self.parameters) elif self._model._built_model is None: self._model.build( diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index a196ac67..47782a03 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -67,7 +67,7 @@ def spm_costs(self, model, parameters, cost_class): # Define the cost to optimise problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: - return cost_class(problem, sigma=[0.03, 0.03]) + return cost_class(problem, sigma0=0.002) else: return cost_class(problem) @@ -107,7 +107,7 @@ def test_optimisation_f_guessed(self, f_guessed, spm_costs): np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x, init_soc): - model.parameters = parameters + model.classify_and_update_parameters(parameters) experiment = pybop.Experiment( [ ( diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 20fdee0e..66a58638 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -11,6 +11,7 @@ class Test_SPM_Parameterisation: @pytest.fixture(autouse=True) def setup(self): + self.sigma0 = 0.002 self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( loc=0.0, scale=0.05, size=2 ) @@ -42,6 +43,7 @@ def init_soc(self, request): @pytest.fixture( params=[ pybop.GaussianLogLikelihoodKnownSigma, + pybop.GaussianLogLikelihood, pybop.RootMeanSquaredError, pybop.SumSquaredError, pybop.MAP, @@ -62,17 +64,19 @@ def spm_costs(self, model, parameters, cost_class, init_soc): "Time [s]": solution["Time [s]"].data, "Current function [A]": solution["Current [A]"].data, "Voltage [V]": solution["Voltage [V]"].data - + self.noise(0.002, len(solution["Time [s]"].data)), + + self.noise(self.sigma0, len(solution["Time [s]"].data)), } ) # Define the cost to optimise problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: - return cost_class(problem, sigma=[0.03, 0.03]) + return cost_class(problem, sigma0=self.sigma0) + elif cost_class in [pybop.GaussianLogLikelihood]: + return cost_class(problem, sigma0=self.sigma0 * 4) # Initial sigma0 guess elif cost_class in [pybop.MAP]: return cost_class( - problem, pybop.GaussianLogLikelihoodKnownSigma, sigma=[0.03, 0.03] + problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0=self.sigma0 ) else: return cost_class(problem) @@ -92,37 +96,48 @@ def spm_costs(self, model, parameters, cost_class, init_soc): @pytest.mark.integration def test_spm_optimisers(self, optimiser, spm_costs): x0 = spm_costs.parameters.initial_value() - # Some optimisers require a complete set of bounds - if optimiser in [ - pybop.SciPyDifferentialEvolution, - ]: - spm_costs.problem.parameters[ - "Positive electrode active material volume fraction" - ].set_bounds([0.375, 0.725]) # Large range to ensure IC within bounds - bounds = spm_costs.problem.parameters.get_bounds() - spm_costs.problem.bounds = bounds - spm_costs.bounds = bounds + common_args = { + "cost": spm_costs, + "max_iterations": 250, + } - # Test each optimiser - if optimiser in [pybop.PSO]: - optim = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.05, max_iterations=250 + # Add sigma0 to ground truth for GaussianLogLikelihood + if isinstance(spm_costs, pybop.GaussianLogLikelihood): + self.ground_truth = np.concatenate( + (self.ground_truth, np.asarray([self.sigma0])) ) - else: - optim = optimiser(cost=spm_costs, sigma0=0.05, max_iterations=250) + + # Set sigma0 and create optimiser + sigma0 = 0.01 if isinstance(spm_costs, pybop.GaussianLogLikelihood) else 0.05 + optim = optimiser(sigma0=sigma0, **common_args) + + # Set max unchanged iterations for BasePintsOptimisers if issubclass(optimiser, pybop.BasePintsOptimiser): - optim.set_max_unchanged_iterations(iterations=35, absolute_tolerance=1e-5) + optim.set_max_unchanged_iterations(iterations=45, absolute_tolerance=1e-5) + + # AdamW will use lowest sigma0 for learning rate, so allow more iterations + if issubclass(optimiser, (pybop.AdamW, pybop.IRPropMin)) and isinstance( + spm_costs, pybop.GaussianLogLikelihood + ): + optim = optimiser(max_unchanged_iterations=75, **common_args) initial_cost = optim.cost(x0) x, final_cost = optim.run() # Assertions - if not np.allclose(x0, self.ground_truth, atol=1e-5): - if optim.minimising: - assert initial_cost > final_cost - else: - assert initial_cost < final_cost - np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + if np.allclose(x0, self.ground_truth, atol=1e-5): + raise AssertionError("Initial guess is too close to ground truth") + + if isinstance(spm_costs, pybop.GaussianLogLikelihood): + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + np.testing.assert_allclose(x[-1], self.sigma0, atol=5e-4) + else: + assert ( + (initial_cost > final_cost) + if optim.minimising + else (initial_cost < final_cost) + ) + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, cost_class): @@ -134,11 +149,11 @@ def spm_two_signal_cost(self, parameters, model, cost_class): "Time [s]": solution["Time [s]"].data, "Current function [A]": solution["Current [A]"].data, "Voltage [V]": solution["Voltage [V]"].data - + self.noise(0.002, len(solution["Time [s]"].data)), + + self.noise(self.sigma0, len(solution["Time [s]"].data)), "Bulk open-circuit voltage [V]": solution[ "Bulk open-circuit voltage [V]" ].data - + self.noise(0.002, len(solution["Time [s]"].data)), + + self.noise(self.sigma0, len(solution["Time [s]"].data)), } ) @@ -149,9 +164,11 @@ def spm_two_signal_cost(self, parameters, model, cost_class): ) if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: - return cost_class(problem, sigma=[0.05, 0.05]) + return cost_class(problem, sigma0=self.sigma0) elif cost_class in [pybop.MAP]: - return cost_class(problem, pybop.GaussianLogLikelihoodKnownSigma) + return cost_class( + problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0=self.sigma0 + ) else: return cost_class(problem) @@ -160,20 +177,13 @@ def spm_two_signal_cost(self, parameters, model, cost_class): [ pybop.SciPyDifferentialEvolution, pybop.IRPropMin, - pybop.XNES, + pybop.CMAES, ], ) @pytest.mark.integration def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): x0 = spm_two_signal_cost.parameters.initial_value() - # Some optimisers require a complete set of bounds - if multi_optimiser in [pybop.SciPyDifferentialEvolution]: - spm_two_signal_cost.problem.parameters[ - "Positive electrode active material volume fraction" - ].set_bounds([0.375, 0.725]) # Large range to ensure IC within bounds - bounds = spm_two_signal_cost.problem.parameters.get_bounds() - spm_two_signal_cost.problem.bounds = bounds - spm_two_signal_cost.bounds = bounds + combined_sigma0 = np.asarray([self.sigma0, self.sigma0]) # Test each optimiser optim = multi_optimiser( @@ -181,6 +191,11 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): sigma0=0.03, max_iterations=250, ) + + # Add sigma0 to ground truth for GaussianLogLikelihood + if isinstance(spm_two_signal_cost, pybop.GaussianLogLikelihood): + self.ground_truth = np.concatenate((self.ground_truth, combined_sigma0)) + if issubclass(multi_optimiser, pybop.BasePintsOptimiser): optim.set_max_unchanged_iterations(iterations=35, absolute_tolerance=1e-5) @@ -188,12 +203,19 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): x, final_cost = optim.run() # Assertions - if not np.allclose(x0, self.ground_truth, atol=1e-5): - if optim.minimising: - assert initial_cost > final_cost - else: - assert initial_cost < final_cost - np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + if np.allclose(x0, self.ground_truth, atol=1e-5): + raise AssertionError("Initial guess is too close to ground truth") + + if isinstance(spm_two_signal_cost, pybop.GaussianLogLikelihood): + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + np.testing.assert_allclose(x[-2:], combined_sigma0, atol=5e-4) + else: + assert ( + (initial_cost > final_cost) + if optim.minimising + else (initial_cost < final_cost) + ) + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) @pytest.mark.parametrize("init_soc", [0.4, 0.6]) @pytest.mark.integration @@ -235,15 +257,14 @@ def test_model_misparameterisation(self, parameters, model, init_soc): np.testing.assert_allclose(x, self.ground_truth, atol=2e-2) def get_data(self, model, parameters, x, init_soc): - model.parameters = parameters + model.classify_and_update_parameters(parameters) experiment = pybop.Experiment( [ ( - "Discharge at 0.5C for 6 minutes (4 second period)", - "Charge at 0.5C for 6 minutes (4 second period)", + "Discharge at 0.5C for 3 minutes (4 second period)", + "Charge at 0.5C for 3 minutes (4 second period)", ), ] - * 2 ) sim = model.predict(init_soc=init_soc, experiment=experiment, inputs=x) return sim diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 45df6ba4..98dde5cb 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -93,7 +93,7 @@ def test_optimisers_on_simple_model(self, optimiser, cost): np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x): - model.parameters = parameters + model.classify_and_update_parameters(parameters) experiment = pybop.Experiment( [ ( diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index e09d3cc4..80e44bea 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -9,6 +9,11 @@ class TestCosts: Class for tests cost functions """ + # Define an invalid likelihood class for MAP tests + class InvalidLikelihood: + def __init__(self, problem, sigma0): + pass + @pytest.fixture def model(self): return pybop.lithium_ion.SPM() @@ -131,12 +136,22 @@ def _evaluateS1(self, inputs): @pytest.mark.unit def test_MAP(self, problem): # Incorrect likelihood - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match="An error occurred when constructing the Likelihood class:", + ): pybop.MAP(problem, pybop.SumSquaredError) # Incorrect construction of likelihood - with pytest.raises(ValueError): - pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma, sigma="string") + with pytest.raises( + ValueError, + match="An error occurred when constructing the Likelihood class: could not convert string to float: 'string'", + ): + pybop.MAP(problem, pybop.GaussianLogLikelihoodKnownSigma, sigma0="string") + + # Incorrect likelihood + with pytest.raises(ValueError, match="must be a subclass of BaseLikelihood"): + pybop.MAP(problem, self.InvalidLikelihood, sigma0=0.1) @pytest.mark.unit def test_costs(self, cost): diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index b99aa5d0..aa68cc0e 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -88,21 +88,22 @@ def test_base_likelihood_call_raises_not_implemented_error( likelihood(np.array([0.5, 0.5])) @pytest.mark.unit - def test_set_get_sigma(self, one_signal_problem): - likelihood = pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, 0.1) - likelihood.set_sigma(np.array([0.3])) - assert np.array_equal(likelihood.get_sigma(), np.array([0.3])) - + def test_likelihood_check_sigma0(self, one_signal_problem): with pytest.raises( ValueError, - match="The GaussianLogLikelihoodKnownSigma cost requires sigma to be " - + "either a scalar value or an array with one entry per dimension.", + match="Sigma0 must be positive", ): - pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, sigma=None) + pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, sigma0=None) likelihood = pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, 0.1) - with pytest.raises(ValueError): - likelihood.set_sigma(np.array([-0.2])) + sigma = likelihood.check_sigma0(0.2) + assert sigma == np.array(0.2) + + with pytest.raises( + ValueError, + match=r"sigma0 must be either a scalar value", + ): + pybop.GaussianLogLikelihoodKnownSigma(one_signal_problem, sigma0=[0.2, 0.3]) @pytest.mark.unit def test_base_likelihood_n_parameters_property(self, one_signal_problem): @@ -116,7 +117,7 @@ def test_base_likelihood_n_parameters_property(self, one_signal_problem): def test_gaussian_log_likelihood_known_sigma(self, problem_name, request): problem = request.getfixturevalue(problem_name) likelihood = pybop.GaussianLogLikelihoodKnownSigma( - problem, sigma=np.array([1.0]) + problem, sigma0=np.array([1.0]) ) result = likelihood(np.array([0.5])) grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5])) @@ -131,7 +132,31 @@ def test_gaussian_log_likelihood(self, one_signal_problem): grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) - assert grad_likelihood[0] <= 0 # TEMPORARY WORKAROUND (Remove in #338) + assert np.all(grad_likelihood <= 0) + + # Test construction with sigma as a Parameter + sigma = pybop.Parameter("sigma", prior=pybop.Uniform(0.4, 0.6)) + likelihood = pybop.GaussianLogLikelihood(one_signal_problem, sigma0=sigma) + + # Test invalid sigma + with pytest.raises( + TypeError, + match=r"Expected sigma0 to contain Parameter objects or numeric values.", + ): + likelihood = pybop.GaussianLogLikelihood( + one_signal_problem, sigma0="Invalid string" + ) + + @pytest.mark.unit + def test_gaussian_log_likelihood_dsigma_scale(self, one_signal_problem): + likelihood = pybop.GaussianLogLikelihood(one_signal_problem, dsigma_scale=0.05) + assert likelihood.dsigma_scale == 0.05 + likelihood.dsigma_scale = 1e3 + assert likelihood.dsigma_scale == 1e3 + + # Test incorrect sigma scale + with pytest.raises(ValueError): + likelihood.dsigma_scale = -1e3 @pytest.mark.unit def test_gaussian_log_likelihood_returns_negative_inf(self, one_signal_problem): @@ -150,7 +175,7 @@ def test_gaussian_log_likelihood_known_sigma_returns_negative_inf( self, one_signal_problem ): likelihood = pybop.GaussianLogLikelihoodKnownSigma( - one_signal_problem, sigma=np.array([0.2]) + one_signal_problem, sigma0=np.array([0.2]) ) assert likelihood(np.array([0.01])) == -np.inf # parameter value too small assert ( diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index 6809aec8..b50a14bf 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -271,7 +271,7 @@ def test_reinit(self): state = model.reinit(inputs={}) np.testing.assert_array_almost_equal(state.as_ndarray(), np.array([[y0]])) - model.parameters = pybop.Parameters(pybop.Parameter("y0")) + model.classify_and_update_parameters(pybop.Parameters(pybop.Parameter("y0"))) state = model.reinit(inputs=[1]) np.testing.assert_array_almost_equal(state.as_ndarray(), np.array([[y0]])) @@ -311,6 +311,9 @@ def test_basemodel(self): with pytest.raises(NotImplementedError): base.approximate_capacity(x) + base.classify_and_update_parameters(parameters=None) + assert base._n_parameters == 0 + @pytest.mark.unit def test_thevenin_model(self): parameter_set = pybop.ParameterSet( diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 740e42d3..5770b991 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -276,6 +276,16 @@ def test_scipy_minimize_with_jac(self, cost): ): optim = pybop.SciPyMinimize(cost=cost, jac="Invalid string") + @pytest.mark.unit + def test_scipy_minimize_invalid_x0(self, cost): + # Check a starting point that returns an infinite cost + invalid_x0 = np.array([1.1]) + optim = pybop.SciPyMinimize( + cost=cost, x0=invalid_x0, maxiter=10, allow_infeasible_solutions=False + ) + optim.run() + assert abs(optim._cost0) != np.inf + @pytest.mark.unit def test_single_parameter(self, cost): # Test catch for optimisers that can only run with multiple parameters diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 02b3ea5c..ebfccea1 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -78,6 +78,16 @@ def test_invalid_inputs(self, parameter): ): pybop.Parameter("Name", bounds=[0.7, 0.3]) + @pytest.mark.unit + def test_sample_initial_values(self): + parameter = pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.02), + bounds=[0.375, 0.7], + ) + sample = parameter.get_initial_value() + assert (sample >= 0.375) and (sample <= 0.7) + class TestParameters: """ diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py index 57f0e4ee..4eb2dc47 100644 --- a/tests/unit/test_plots.py +++ b/tests/unit/test_plots.py @@ -144,3 +144,52 @@ def test_with_ipykernel(self, dataset, cost, optim): pybop.plot_convergence(optim) pybop.plot_parameters(optim) pybop.plot2d(optim, steps=5) + + @pytest.mark.unit + def test_gaussianlogliklihood_plots(self, fitting_problem): + # Test plotting of GaussianLogLikelihood + likelihood = pybop.GaussianLogLikelihood(fitting_problem) + optim = pybop.CMAES(likelihood, max_iterations=5) + optim.run() + + # Plot parameters + pybop.plot_parameters(optim) + + @pytest.mark.unit + def test_plot2d_incorrect_number_of_parameters(self, model, dataset): + # Test with less than two paramters + parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + ), + ) + fitting_problem = pybop.FittingProblem(model, parameters, dataset) + cost = pybop.SumSquaredError(fitting_problem) + with pytest.raises( + ValueError, match="This cost function takes fewer than 2 parameters." + ): + pybop.plot2d(cost) + + # Test with more than two paramters + parameters = pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.68, 0.05), + bounds=[0.5, 0.8], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.58, 0.05), + bounds=[0.4, 0.7], + ), + pybop.Parameter( + "Positive particle radius [m]", + prior=pybop.Gaussian(4.8e-06, 0.05e-06), + bounds=[4e-06, 6e-06], + ), + ) + fitting_problem = pybop.FittingProblem(model, parameters, dataset) + cost = pybop.SumSquaredError(fitting_problem) + pybop.plot2d(cost)