diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0cb8b572..d6baab06 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ ci: repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.5.5" + rev: "v0.5.6" hooks: - id: ruff args: [--fix, --show-fixes] diff --git a/CHANGELOG.md b/CHANGELOG.md index 38a87722..5e45108a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,9 @@ ## Features - [#353](https://github.com/pybop-team/PyBOP/issues/353) - Allow user-defined check_params functions to enforce nonlinear constraints and enable SciPy constrained optimisation methods +- [#418](https://github.com/pybop-team/PyBOP/issues/418) - Wraps the `get_parameter_info` method from PyBaMM to get a dictionary of parameter names and types. +- [#413](https://github.com/pybop-team/PyBOP/pull/413) - Adds `DesignCost` functionality to `WeightedCost` class with additional tests. +- [#357](https://github.com/pybop-team/PyBOP/pull/357) - Adds `Transformation()` class with `LogTransformation()`, `IdentityTransformation()`, and `ScaledTransformation()`, `ComposedTransformation()` implementations with corresponding examples and tests. - [#427](https://github.com/pybop-team/PyBOP/issues/427) - Adds the nbstripout pre-commit hook to remove unnecessary metadata from notebooks. - [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script. - [#393](https://github.com/pybop-team/PyBOP/pull/383) - Adds Minkowski and SumofPower cost classes, with an example and corresponding tests. @@ -10,6 +13,8 @@ ## Bug Fixes +- [#421](https://github.com/pybop-team/PyBOP/issues/421) - Adds a default value for the initial SOC for design problems. + ## Breaking Changes # [v24.6.1](https://github.com/pybop-team/PyBOP/tree/v24.6.1) - 2024-07-31 @@ -20,7 +25,6 @@ ## Bug Fixes - ## Breaking Changes # [v24.6](https://github.com/pybop-team/PyBOP/tree/v24.6) - 2024-07-08 diff --git a/examples/notebooks/spm_electrode_design.ipynb b/examples/notebooks/spm_electrode_design.ipynb index e7b2fd57..6bc92a12 100644 --- a/examples/notebooks/spm_electrode_design.ipynb +++ b/examples/notebooks/spm_electrode_design.ipynb @@ -267,7 +267,7 @@ { "data": { "image/svg+xml": [ - "01000200030002.42.62.833.23.43.63.84ReferenceOptimisedOptimised ComparisonTime / sVoltage / V" + "01000200030002.42.62.833.23.43.63.84ReferenceOptimisedOptimised ComparisonTime / sVoltage / V" ] }, "metadata": {}, @@ -306,7 +306,7 @@ { "data": { "image/svg+xml": [ - "70μ80μ90μ100μ2μ3μ4μ5μ6μ7μ8μ9μ320340360380400Cost LandscapePositive electrode thickness [m]Positive particle radius [m]" + "70μ80μ90μ100μ2μ3μ4μ5μ6μ7μ8μ9μ320340360380400Cost LandscapePositive electrode thickness [m]Positive particle radius [m]" ] }, "metadata": {}, diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index ed38144a..4376fb46 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -13,12 +13,14 @@ prior=pybop.Gaussian(6e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Negative particle radius [m]"], + transformation=pybop.LogTransformation(), ), pybop.Parameter( "Positive particle radius [m]", prior=pybop.Gaussian(4.5e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Positive particle radius [m]"], + transformation=pybop.LogTransformation(), ), ) @@ -42,7 +44,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES(cost, max_iterations=100) +optim = pybop.CMAES(cost, sigma0=0.25, max_unchanged_iterations=20, max_iterations=50) # Run the optimisation x, final_cost = optim.run() diff --git a/examples/scripts/spm_weighted_cost.py b/examples/scripts/spm_weighted_cost.py index 74c43a33..43c5cd00 100644 --- a/examples/scripts/spm_weighted_cost.py +++ b/examples/scripts/spm_weighted_cost.py @@ -24,24 +24,37 @@ # Generate data sigma = 0.001 -t_eval = np.arange(0, 900, 3) -values = model.predict(t_eval=t_eval) -corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) +init_soc = 0.5 +experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 3 minutes (3 second period)", + "Charge at 0.5C for 3 minutes (3 second period)", + ), + ] + * 2 +) +values = model.predict(experiment=experiment, init_soc=init_soc) + + +def noise(sigma): + return np.random.normal(0, sigma, len(values["Voltage [V]"].data)) + # Form dataset dataset = pybop.Dataset( { - "Time [s]": t_eval, + "Time [s]": values["Time [s]"].data, "Current function [A]": values["Current [A]"].data, - "Voltage [V]": corrupt_values, + "Voltage [V]": values["Voltage [V]"].data + noise(sigma), } ) # Generate problem, cost function, and optimisation class -problem = pybop.FittingProblem(model, parameters, dataset) +problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) cost1 = pybop.SumSquaredError(problem) cost2 = pybop.RootMeanSquaredError(problem) -weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100]) +weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[0.1, 1]) for cost in [weighted_cost, cost1, cost2]: optim = pybop.IRPropMin(cost, max_iterations=60) diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py index f5b7c827..8542b4ad 100644 --- a/examples/scripts/spme_max_energy.py +++ b/examples/scripts/spme_max_energy.py @@ -9,12 +9,6 @@ # electrode widths, particle radii, volume fractions and # separator width. -# NOTE: This script can be easily adjusted to consider the volumetric -# (instead of gravimetric) energy density by changing the line which -# defines the cost and changing the output to: -# print(f"Initial volumetric energy density: {cost(optim.x0):.2f} Wh.m-3") -# print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3") - # Define parameter set and model parameter_set = pybop.ParameterSet.pybamm("Chen2020", formation_concentrations=True) model = pybop.lithium_ion.SPMe(parameter_set=parameter_set) @@ -45,17 +39,21 @@ model, parameters, experiment, signal=signal, init_soc=init_soc ) -# Generate cost function and optimisation class: -cost = pybop.GravimetricEnergyDensity(problem) +# Generate multiple cost functions and combine them. +cost1 = pybop.GravimetricEnergyDensity(problem, update_capacity=True) +cost2 = pybop.VolumetricEnergyDensity(problem, update_capacity=True) +cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) + +# Run optimisation optim = pybop.PSO( cost, verbose=True, allow_infeasible_solutions=False, max_iterations=15 ) - -# Run optimisation x, final_cost = optim.run() print("Estimated parameters:", x) -print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1") -print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1") +print(f"Initial gravimetric energy density: {cost1(optim.x0):.2f} Wh.kg-1") +print(f"Optimised gravimetric energy density: {cost1(x):.2f} Wh.kg-1") +print(f"Initial volumetric energy density: {cost2(optim.x0):.2f} Wh.m-3") +print(f"Optimised volumetric energy density: {cost2(x):.2f} Wh.m-3") # Plot the timeseries output if cost.update_capacity: diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 99917f3f..1103ea89 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -21,7 +21,7 @@ class StandaloneCost(pybop.BaseCost): Methods ------- - __call__(x, grad=None) + __call__(x) Calculate the cost for a given parameter value. """ @@ -43,7 +43,7 @@ def __init__(self, problem=None): ) self.x0 = self.parameters.initial_value() - def _evaluate(self, inputs, grad=None): + def _evaluate(self, inputs): """ Calculate the cost for a given parameter value. @@ -54,9 +54,6 @@ def _evaluate(self, inputs, grad=None): ---------- inputs : Dict The parameters for which to evaluate the cost. - grad : array-like, optional - Unused parameter, present for compatibility with gradient-based - optimizers. Returns ------- diff --git a/examples/standalone/problem.py b/examples/standalone/problem.py index 18bf1f7d..5fd33e0f 100644 --- a/examples/standalone/problem.py +++ b/examples/standalone/problem.py @@ -42,7 +42,7 @@ def __init__( ) self._target = {signal: self._dataset[signal] for signal in self.signal} - def evaluate(self, inputs): + def evaluate(self, inputs, **kwargs): """ Evaluate the model with the given parameters and return the signal. diff --git a/pybop/__init__.py b/pybop/__init__.py index 922a6480..ff8c08a8 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -55,6 +55,17 @@ # from ._dataset import Dataset +# +# Transformation classes +# +from .transformation.base_transformation import Transformation +from .transformation.transformations import ( + IdentityTransformation, + ScaledTransformation, + LogTransformation, + ComposedTransformation, +) + # # Parameter classes # diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 1f96e2fb..87ed9381 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -39,11 +39,11 @@ def __init__(self, problem: BaseProblem, sigma0: Union[list[float], float]): self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi * self.sigma2) self._multip = -1 / (2.0 * self.sigma2) - def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: + def _evaluate(self, inputs: Inputs) -> float: """ Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return -np.inf e = np.asarray( @@ -51,9 +51,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._offset + self._multip - * np.sum( - (self._target[signal] - self._current_prediction[signal]) ** 2.0 - ) + * np.sum((self._target[signal] - self.y[signal]) ** 2.0) ) for signal in self.signal ] @@ -65,20 +63,15 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: """ Calculates the log-likelihood and gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) r = np.asarray( - [ - self._target[signal] - self._current_prediction[signal] - for signal in self.signal - ] - ) - dl = np.sum( - (np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1 + [self._target[signal] - self.y[signal] for signal in self.signal] ) + dl = np.sum((np.sum((r * self.dy.T), axis=2) / self.sigma2), axis=1) return likelihood, dl @@ -123,7 +116,7 @@ def __init__( super().__init__(problem) self._dsigma_scale = dsigma_scale self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi) - self._fixed_problem = False # keep problem evaluation within _evaluate + self._has_separable_problem = False self.sigma = Parameters() self._add_sigma_parameters(sigma0) @@ -175,7 +168,7 @@ def dsigma_scale(self, new_value): raise ValueError("dsigma_scale must be non-negative") self._dsigma_scale = new_value - def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: + def _evaluate(self, inputs: Inputs) -> float: """ Evaluates the Gaussian log-likelihood for the given parameters. @@ -196,10 +189,8 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo if np.any(sigma <= 0): return -np.inf - self._current_prediction = self.problem.evaluate( - self.problem.parameters.as_dict() - ) - if not self.verify_prediction(self._current_prediction): + self.y = self.problem.evaluate(self.problem.parameters.as_dict()) + if not self.verify_prediction(self.y): return -np.inf e = np.asarray( @@ -207,9 +198,7 @@ def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> flo np.sum( self._logpi - self.n_time_data * np.log(sigma) - - np.sum( - (self._target[signal] - self._current_prediction[signal]) ** 2.0 - ) + - np.sum((self._target[signal] - self.y[signal]) ** 2.0) / (2.0 * sigma**2.0) ) for signal in self.signal @@ -238,23 +227,16 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if np.any(sigma <= 0): return -np.inf, -self._de * np.ones(self.n_parameters) - self._current_prediction, self._current_sensitivities = self.problem.evaluateS1( - self.problem.parameters.as_dict() - ) - if not self.verify_prediction(self._current_prediction): + self.y, self.dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) + if not self.verify_prediction(self.y): return -np.inf, -self._de * np.ones(self.n_parameters) likelihood = self._evaluate(inputs) r = np.asarray( - [ - self._target[signal] - self._current_prediction[signal] - for signal in self.signal - ] - ) - dl = np.sum( - (np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1 + [self._target[signal] - self.y[signal] for signal in self.signal] ) + dl = np.sum((np.sum((r * self.dy.T), axis=2) / (sigma**2.0)), axis=1) dsigma = ( -self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0) ) / self._dsigma_scale @@ -296,7 +278,10 @@ def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-3): ): raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") - def _evaluate(self, inputs: Inputs, grad=None) -> float: + self.parameters = self.likelihood.parameters + self._has_separable_problem = self.likelihood._has_separable_problem + + def _evaluate(self, inputs: Inputs) -> float: """ Calculate the maximum a posteriori cost for a given set of parameters. @@ -304,9 +289,6 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: ---------- inputs : Inputs The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -320,9 +302,9 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: if not np.isfinite(log_prior).any(): return -np.inf - if self._fixed_problem: - self.likelihood._current_prediction = self._current_prediction - log_likelihood = self.likelihood._evaluate(inputs) + if self._has_separable_problem: + self.likelihood.y = self.y + log_likelihood = self.likelihood.evaluate(inputs) posterior = log_likelihood + log_prior return posterior @@ -354,12 +336,9 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]: if not np.isfinite(log_prior).any(): return -np.inf, -self._de * np.ones(self.n_parameters) - if self._fixed_problem: - ( - self.likelihood._current_prediction, - self.likelihood._current_sensitivities, - ) = self._current_prediction, self._current_sensitivities - log_likelihood, dl = self.likelihood._evaluateS1(inputs) + if self._has_separable_problem: + self.likelihood.y, self.likelihood.dy = (self.y, self.dy) + log_likelihood, dl = self.likelihood.evaluateS1(inputs) # Compute a finite difference approximation of the gradient of the log prior delta = self.parameters.initial_value() * self.gradient_step diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index effa5a51..ac4d9002 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -1,8 +1,9 @@ +import warnings from typing import Optional import numpy as np -from pybop import BaseCost +from pybop import BaseCost, BaseLikelihood, DesignCost from pybop.parameters.parameter import Inputs @@ -13,54 +14,73 @@ class WeightedCost(BaseCost): Inherits all parameters and attributes from ``BaseCost``. - Additional Attributes + Attributes --------------------- costs : list[pybop.BaseCost] A list of PyBOP cost objects. weights : list[float] A list of values with which to weight the cost values. - _different_problems : bool - If True, the problem for each cost is evaluated independently during - each evaluation of the cost (default: False). + _has_identical_problems : bool + If True, the shared problem will be evaluated once and saved before the + self._evaluate() method of each cost is called (default: False). + _has_separable_problem: bool + If True, the shared problem is seperable from the cost function and + will be evaluated for each problem before the cost evaluation is + called (default: False). This attribute is used for sub-cost objects; + however, the top-level WeightedCost attribute is not used (i.e. == False). """ - def __init__(self, *args, weights: Optional[list[float]] = None): - self.costs = [] - for cost in args: - if not isinstance(cost, BaseCost): - raise TypeError(f"Received {type(cost)} instead of cost object.") - self.costs.append(cost) - self.weights = weights - self._different_problems = False - - if self.weights is None: + def __init__(self, *costs, weights: Optional[list[float]] = None): + if not all(isinstance(cost, BaseCost) for cost in costs): + raise TypeError("All costs must be instances of BaseCost.") + self.costs = [cost for cost in costs] + if len(set(type(cost.problem) for cost in self.costs)) > 1: + raise TypeError("All problems must be of the same class type.") + self.minimising = not any( + isinstance(cost, (BaseLikelihood, DesignCost)) for cost in self.costs + ) + + # Check if weights are provided + if weights is not None: + try: + self.weights = np.asarray(weights, dtype=float) + except ValueError: + raise ValueError("Weights must be numeric values.") from None + + if self.weights.size != len(self.costs): + raise ValueError("Number of weights must match number of costs.") + else: self.weights = np.ones(len(self.costs)) - elif isinstance(self.weights, list): - self.weights = np.array(self.weights) - if not isinstance(self.weights, np.ndarray): - raise TypeError( - "Expected a list or array of weights the same length as costs." - ) - if not len(self.weights) == len(self.costs): - raise ValueError( - "Expected a list or array of weights the same length as costs." - ) # Check if all costs depend on the same problem - for cost in self.costs: - if hasattr(cost, "problem") and cost.problem is not self.costs[0].problem: - self._different_problems = True + self._has_identical_problems = all( + cost._has_separable_problem and cost.problem is self.costs[0].problem + for cost in self.costs + ) - if not self._different_problems: + if self._has_identical_problems: super().__init__(self.costs[0].problem) - self._fixed_problem = self.costs[0]._fixed_problem else: super().__init__() - self._fixed_problem = False for cost in self.costs: self.parameters.join(cost.parameters) - def _evaluate(self, inputs: Inputs, grad=None): + # Check if any cost function requires capacity update + self.update_capacity = False + if any(cost.update_capacity for cost in self.costs): + self.update_capacity = True + + warnings.warn( + "WeightedCost doesn't currently support DesignCosts with different `update_capacity` attributes,\n" + f"Using global `DesignCost.update_capacity` attribute as: {self.update_capacity}", + UserWarning, + stacklevel=2, + ) + + # Weighted costs do not use this functionality + self._has_separable_problem = False + + def _evaluate(self, inputs: Inputs): """ Calculate the weighted cost for a given set of parameters. @@ -68,29 +88,28 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- float The weighted cost value. """ - e = np.empty_like(self.costs) + self.parameters.update(values=list(inputs.values())) - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) + if self._has_identical_problems: + self.y = self.problem.evaluate(inputs, update_capacity=self.update_capacity) + + e = np.empty_like(self.costs) for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction = cost.problem.evaluate(inputs) - else: - cost._current_prediction = self._current_prediction - e[i] = cost._evaluate(inputs, grad) + inputs = cost.parameters.as_dict() + if self._has_identical_problems: + cost.y = self.y + elif cost._has_separable_problem: + cost.y = cost.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) + e[i] = cost._evaluate(inputs) return np.dot(e, self.weights) @@ -109,30 +128,27 @@ def _evaluateS1(self, inputs: Inputs): A tuple containing the cost and the gradient. The cost is a float, and the gradient is an array-like of the same length as `x`. """ + self.parameters.update(values=list(inputs.values())) + + if self._has_identical_problems: + self.y, self.dy = self.problem.evaluateS1(inputs) + e = np.empty_like(self.costs) de = np.empty((len(self.parameters), len(self.costs))) - if not self._fixed_problem and self._different_problems: - self.parameters.update(values=list(inputs.values())) - elif not self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) - for i, cost in enumerate(self.costs): - if not self._fixed_problem and self._different_problems: - inputs = cost.parameters.as_dict() - cost._current_prediction, cost._current_sensitivities = ( - cost.problem.evaluateS1(inputs) - ) - else: - cost._current_prediction, cost._current_sensitivities = ( - self._current_prediction, - self._current_sensitivities, - ) + inputs = cost.parameters.as_dict() + if self._has_identical_problems: + cost.y, cost.dy = (self.y, self.dy) + elif cost._has_separable_problem: + cost.y, cost.dy = cost.problem.evaluateS1(inputs) e[i], de[:, i] = cost._evaluateS1(inputs) e = np.dot(e, self.weights) de = np.dot(de, self.weights) return e, de + + @property + def has_identical_problems(self): + return self._has_identical_problems diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index eedbbc2c..168a4dee 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -22,37 +22,44 @@ class BaseCost: An array containing the target data to fit. n_outputs : int The number of outputs in the model. - - Additional Attributes - --------------------- - _fixed_problem : bool - If True, the problem does not need to be rebuilt before the cost is - calculated (default: False). + _has_separable_problem : bool + If True, the problem is separable from the cost function and will be + evaluated in advance of the call to self._evaluate() (default: False). """ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() + self.transformation = None self.problem = problem - self._fixed_problem = False + self.verbose = False + self._has_separable_problem = False + self.update_capacity = False + self.y = None + self.dy = None self.set_fail_gradient() if isinstance(self.problem, BaseProblem): self._target = self.problem._target self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal - self._fixed_problem = True + self.transformation = self.parameters.construct_transformation() + self._has_separable_problem = True @property def n_parameters(self): return len(self.parameters) - def __call__(self, inputs: Union[Inputs, list], grad=None): + @property + def has_separable_problem(self): + return self._has_separable_problem + + def __call__(self, inputs: Union[Inputs, list]): """ Call the evaluate function for a given set of parameters. """ - return self.evaluate(inputs, grad) + return self.evaluate(inputs) - def evaluate(self, inputs: Union[Inputs, list], grad=None): + def evaluate(self, inputs: Union[Inputs, list]): """ Call the evaluate function for a given set of parameters. @@ -60,9 +67,6 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): ---------- inputs : Inputs or array-like The parameters for which to compute the cost and gradient. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -74,13 +78,17 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): ValueError If an error occurs during the calculation of the cost. """ - inputs = self.parameters.verify(inputs) + if self.transformation: + p = self.transformation.to_model(inputs) + inputs = self.parameters.verify(p if self.transformation else inputs) try: - if self._fixed_problem: - self._current_prediction = self.problem.evaluate(inputs) + if self._has_separable_problem: + self.y = self.problem.evaluate( + inputs, update_capacity=self.update_capacity + ) - return self._evaluate(inputs, grad) + return self._evaluate(inputs) except NotImplementedError as e: raise e @@ -88,7 +96,7 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): except Exception as e: raise ValueError(f"Error in cost calculation: {e}") from e - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the cost function value for a given set of parameters. @@ -98,9 +106,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -134,13 +139,13 @@ def evaluateS1(self, inputs: Union[Inputs, list]): ValueError If an error occurs during the calculation of the cost or gradient. """ - inputs = self.parameters.verify(inputs) + if self.transformation: + p = self.transformation.to_model(inputs) + inputs = self.parameters.verify(p if self.transformation else inputs) try: - if self._fixed_problem: - self._current_prediction, self._current_sensitivities = ( - self.problem.evaluateS1(inputs) - ) + if self._has_separable_problem: + self.y, self.dy = self.problem.evaluateS1(inputs) return self._evaluateS1(inputs) diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 738dfe61..ce00ad9e 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -65,26 +65,6 @@ def update_simulation_data(self, inputs: Inputs): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def _evaluate(self, inputs: Inputs, grad=None): - """ - Computes the value of the cost function. - - This method must be implemented by subclasses. - - Parameters - ---------- - inputs : Inputs - The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. - - Raises - ------ - NotImplementedError - If the method has not been implemented by the subclass. - """ - raise NotImplementedError - class GravimetricEnergyDensity(DesignCost): """ @@ -100,7 +80,7 @@ def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) self._fixed_problem = False # keep problem evaluation within _evaluate - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Computes the cost function for the energy density. @@ -108,39 +88,21 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Returns ------- float The gravimetric energy density or -infinity in case of infeasible parameters. """ - try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - solution = self.problem.evaluate(inputs) - - voltage, current = solution["Voltage [V]"], solution["Current [A]"] - energy_density = np.trapz(voltage * current, dx=self.dt) / ( - 3600 * self.problem.model.cell_mass(self.parameter_set) - ) - - return energy_density - - # Catch infeasible solutions and return infinity - except UserWarning as e: - print(f"Ignoring this sample due to: {e}") + if not any(np.isfinite(self.y[signal][0]) for signal in self.signal): return -np.inf - # Catch any other exception and return infinity - except Exception as e: - print(f"An error occurred during the evaluation: {e}") - return -np.inf + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] + energy_density = np.trapz(voltage * current, dx=self.dt) / ( + 3600 * self.problem.model.cell_mass(self.parameter_set) + ) + + return energy_density class VolumetricEnergyDensity(DesignCost): @@ -155,9 +117,8 @@ class VolumetricEnergyDensity(DesignCost): def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) - self._fixed_problem = False # keep problem evaluation within _evaluate - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Computes the cost function for the energy density. @@ -165,36 +126,18 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Returns ------- float The volumetric energy density or -infinity in case of infeasible parameters. """ - try: - with warnings.catch_warnings(): - # Convert UserWarning to an exception - warnings.filterwarnings("error", category=UserWarning) - - if self.update_capacity: - self.problem.model.approximate_capacity(inputs) - solution = self.problem.evaluate(inputs) - - voltage, current = solution["Voltage [V]"], solution["Current [A]"] - energy_density = np.trapz(voltage * current, dx=self.dt) / ( - 3600 * self.problem.model.cell_volume(self.parameter_set) - ) - - return energy_density - - # Catch infeasible solutions and return infinity - except UserWarning as e: - print(f"Ignoring this sample due to: {e}") + if not any(np.isfinite(self.y[signal][0]) for signal in self.signal): return -np.inf - # Catch any other exception and return infinity - except Exception as e: - print(f"An error occurred during the evaluation: {e}") - return -np.inf + voltage, current = self.y["Voltage [V]"], self.y["Current [A]"] + energy_density = np.trapz(voltage * current, dx=self.dt) / ( + 3600 * self.problem.model.cell_volume(self.parameter_set) + ) + + return energy_density diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 18cc752d..f3690189 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -20,7 +20,7 @@ class RootMeanSquaredError(BaseCost): def __init__(self, problem): super().__init__(problem) - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the root mean square error for a given set of parameters. @@ -38,16 +38,12 @@ def _evaluate(self, inputs: Inputs, grad=None): The root mean square error. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sqrt( - np.mean( - (self._current_prediction[signal] - self._target[signal]) ** 2 - ) - ) + np.sqrt(np.mean((self.y[signal] - self._target[signal]) ** 2)) for signal in self.signal ] ) @@ -74,19 +70,14 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sqrt(np.mean(r**2, axis=1)) - de = np.mean((r * self._current_sensitivities.T), axis=2) / ( - e + np.finfo(float).eps - ) + de = np.mean((r * self.dy.T), axis=2) / (e + np.finfo(float).eps) if self.n_outputs == 1: return e.item(), de.flatten() @@ -115,7 +106,7 @@ class SumSquaredError(BaseCost): def __init__(self, problem): super().__init__(problem) - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the sum of squared errors for a given set of parameters. @@ -123,21 +114,18 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- float The Sum of Squared Error. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum((self._current_prediction[signal] - self._target[signal]) ** 2) + np.sum((self.y[signal] - self._target[signal]) ** 2) for signal in self.signal ] ) @@ -164,17 +152,14 @@ def _evaluateS1(self, inputs: Inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sum(np.sum(r**2, axis=0), axis=0) - de = 2 * np.sum(np.sum((r * self._current_sensitivities.T), axis=2), axis=1) + de = 2 * np.sum(np.sum((r * self.dy.T), axis=2), axis=1) return e, de @@ -234,15 +219,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Minkowski cost. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) ** (1 / self.p) for signal in self.signal ] @@ -270,27 +252,21 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) ** (1 / self.p) for signal in self.signal ] ) de = np.sum( - np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2) + np.sum(r ** (self.p - 1) * self.dy.T, axis=2) / (e ** (self.p - 1) + np.finfo(float).eps), axis=1, ) @@ -350,15 +326,12 @@ def _evaluate(self, inputs: Inputs, grad=None): float The Sum of Power cost. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf e = np.asarray( [ - np.sum( - np.abs(self._current_prediction[signal] - self._target[signal]) - ** self.p - ) + np.sum(np.abs(self.y[signal] - self._target[signal]) ** self.p) for signal in self.signal ] ) @@ -385,19 +358,14 @@ def _evaluateS1(self, inputs): ValueError If an error occurs during the calculation of the cost or gradient. """ - if not self.verify_prediction(self._current_prediction): + if not self.verify_prediction(self.y): return np.inf, self._de * np.ones(self.n_parameters) r = np.asarray( - [ - self._current_prediction[signal] - self._target[signal] - for signal in self.signal - ] + [self.y[signal] - self._target[signal] for signal in self.signal] ) e = np.sum(np.sum(np.abs(r) ** self.p)) - de = self.p * np.sum( - np.sum(r ** (self.p - 1) * self._current_sensitivities.T, axis=2), axis=1 - ) + de = self.p * np.sum(np.sum(r ** (self.p - 1) * self.dy.T, axis=2), axis=1) return e, de @@ -416,9 +384,9 @@ class ObserverCost(BaseCost): def __init__(self, observer: Observer): super().__init__(problem=observer) self._observer = observer - self._fixed_problem = False # keep problem evaluation within _evaluate + self._has_separable_problem = False - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the observer cost for a given set of parameters. diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py index 784144f2..b7124e22 100644 --- a/pybop/models/base_model.py +++ b/pybop/models/base_model.py @@ -78,6 +78,7 @@ def __init__( self._parameter_set = pybamm.ParameterValues(parameter_set.params) self.param_checker = check_params + self.pybamm_model = None self.parameters = Parameters() self.dataset = None self.signal = None @@ -229,7 +230,7 @@ def rebuild( parameters : pybop.Parameters or Dict, optional A pybop Parameters class or dictionary containing parameter values to apply to the model. parameter_set : pybop.parameter_set, optional - A PyBOP parameter set object or a dictionary containing the parameter values + A PyBOP parameter set object or a dictionary containing the parameter values. check_model : bool, optional If True, the model will be checked for correctness after construction. init_soc : float, optional @@ -475,18 +476,18 @@ def simulateS1(self, inputs: Inputs, t_eval: np.array): def predict( self, - inputs: Inputs = None, - t_eval: np.array = None, - parameter_set: ParameterSet = None, - experiment: Experiment = None, + inputs: Optional[Inputs] = None, + t_eval: Optional[np.array] = None, + parameter_set: Optional[ParameterSet] = None, + experiment: Optional[Experiment] = None, init_soc: Optional[float] = None, ) -> dict[str, np.ndarray[np.float64]]: """ Solve the model using PyBaMM's simulation framework and return the solution. This method sets up a PyBaMM simulation by configuring the model, parameters, experiment - (if any), and initial state of charge (if provided). It then solves the simulation and - returns the resulting solution object. + or time vector, and initial state of charge (if provided). Either 't_eval' or 'experiment' + must be provided. It then solves the simulation and returns the resulting solution object. Parameters ---------- @@ -518,35 +519,43 @@ def predict( if PyBaMM models are not supported by the current simulation method. """ - inputs = self.parameters.verify(inputs) - - if not self.pybamm_model._built: - self.pybamm_model.build_model() + if self._unprocessed_model is None: + raise ValueError( + "The predict method currently only supports PyBaMM models." + ) + elif not self._unprocessed_model._built: + self._unprocessed_model.build_model() parameter_set = parameter_set or self._unprocessed_parameter_set if inputs is not None: + inputs = self.parameters.verify(inputs) parameter_set.update(inputs) + if init_soc is not None and isinstance( + self.pybamm_model, pybamm.equivalent_circuit.Thevenin + ): + parameter_set["Initial SoC"] = init_soc + init_soc = None + if self.check_params( - inputs=inputs, parameter_set=parameter_set, allow_infeasible_solutions=self.allow_infeasible_solutions, ): - if self._unprocessed_model is not None: - if experiment is None: - return pybamm.Simulation( - self._unprocessed_model, - parameter_values=parameter_set, - ).solve(t_eval=t_eval, initial_soc=init_soc) - else: - return pybamm.Simulation( - self._unprocessed_model, - experiment=experiment, - parameter_values=parameter_set, - ).solve(initial_soc=init_soc) + if experiment is not None: + return pybamm.Simulation( + model=self._unprocessed_model, + experiment=experiment, + parameter_values=parameter_set, + ).solve(initial_soc=init_soc) + elif t_eval is not None: + return pybamm.Simulation( + model=self._unprocessed_model, + parameter_values=parameter_set, + ).solve(t_eval=t_eval, initial_soc=init_soc) else: raise ValueError( - "This sim method currently only supports PyBaMM models" + "The predict method requires either an experiment or " + "t_eval to be specified." ) else: @@ -554,8 +563,8 @@ def predict( def check_params( self, - inputs: Inputs = None, - parameter_set: ParameterSet = None, + inputs: Optional[Inputs] = None, + parameter_set: Optional[ParameterSet] = None, allow_infeasible_solutions: bool = True, ): """ @@ -565,6 +574,8 @@ def check_params( ---------- inputs : Inputs The input parameters for the simulation. + parameter_set : pybop.parameter_set, optional + A PyBOP parameter set object or a dictionary containing the parameter values. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -574,14 +585,20 @@ def check_params( A boolean which signifies whether the parameters are compatible. """ - inputs = self.parameters.verify(inputs) + inputs = self.parameters.verify(inputs) or {} + parameter_set = parameter_set or self._parameter_set return self._check_params( - inputs=inputs, allow_infeasible_solutions=allow_infeasible_solutions + inputs=inputs, + parameter_set=parameter_set, + allow_infeasible_solutions=allow_infeasible_solutions, ) def _check_params( - self, inputs: Inputs = None, allow_infeasible_solutions: bool = True + self, + inputs: Inputs, + parameter_set: ParameterSet, + allow_infeasible_solutions: bool = True, ): """ A compatibility check for the model parameters which can be implemented by subclasses @@ -591,6 +608,8 @@ def _check_params( ---------- inputs : Inputs The input parameters for the simulation. + parameter_set : pybop.parameter_set + A PyBOP parameter set object or a dictionary containing the parameter values. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -734,3 +753,23 @@ def solver(self): @solver.setter def solver(self, solver): self._solver = solver.copy() if solver is not None else None + + def get_parameter_info(self, print_info: bool = False): + """ + Extracts the parameter names and types and returns them as a dictionary. + """ + if not self.pybamm_model._built: + self.pybamm_model.build_model() + + info = self.pybamm_model.get_parameter_info() + + reduced_info = dict() + for param, param_type in info.values(): + param_name = getattr(param, "name", str(param)) + reduced_info[param_name] = param_type + + if print_info: + for param, param_type in info.values(): + print(param, " : ", param_type) + + return reduced_info diff --git a/pybop/models/empirical/base_ecm.py b/pybop/models/empirical/base_ecm.py index eb3c8a82..33718819 100644 --- a/pybop/models/empirical/base_ecm.py +++ b/pybop/models/empirical/base_ecm.py @@ -1,4 +1,5 @@ from pybop.models.base_model import BaseModel, Inputs +from pybop.parameters.parameter_set import ParameterSet class ECircuitModel(BaseModel): @@ -47,15 +48,14 @@ def __init__( model_options = dict(build=False) for key, value in model_kwargs.items(): model_options[key] = value - self.pybamm_model = pybamm_model(**model_options) - self._unprocessed_model = self.pybamm_model + pybamm_model = pybamm_model(**model_options) # Correct OCP if set to default if ( parameter_set is not None and "Open-circuit voltage [V]" in parameter_set.keys() ): - default_ocp = self.pybamm_model.default_parameter_values[ + default_ocp = pybamm_model.default_parameter_values[ "Open-circuit voltage [V]" ] if parameter_set["Open-circuit voltage [V]"] == "default": @@ -65,6 +65,8 @@ def __init__( super().__init__( name=name, parameter_set=parameter_set, check_params=check_params ) + self.pybamm_model = pybamm_model + self._unprocessed_model = self.pybamm_model # Set parameters, using either the provided ones or the default self.default_parameter_values = self.pybamm_model.default_parameter_values @@ -88,7 +90,12 @@ def __init__( self._disc = None self.geometric_parameters = {} - def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): + def _check_params( + self, + inputs: Inputs, + parameter_set: ParameterSet, + allow_infeasible_solutions: bool = True, + ): """ Check the compatibility of the model parameters. @@ -96,6 +103,8 @@ def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): ---------- inputs : Inputs The input parameters for the simulation. + parameter_set : pybop.parameter_set + A PyBOP parameter set object or a dictionary containing the parameter values. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -103,7 +112,6 @@ def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): ------- bool A boolean which signifies whether the parameters are compatible. - """ if self.param_checker: return self.param_checker(inputs, allow_infeasible_solutions) diff --git a/pybop/models/empirical/ecm.py b/pybop/models/empirical/ecm.py index 4cf27abf..f831aee8 100644 --- a/pybop/models/empirical/ecm.py +++ b/pybop/models/empirical/ecm.py @@ -1,7 +1,6 @@ from pybamm import equivalent_circuit as pybamm_equivalent_circuit from pybop.models.empirical.base_ecm import ECircuitModel -from pybop.parameters.parameter import Inputs class Thevenin(ECircuitModel): @@ -44,24 +43,3 @@ def __init__( super().__init__( pybamm_model=pybamm_equivalent_circuit.Thevenin, name=name, **model_kwargs ) - - def _check_params(self, inputs: Inputs = None, allow_infeasible_solutions=True): - """ - Check the compatibility of the model parameters. - - Parameters - ---------- - inputs : Inputs - The input parameters for the simulation. - allow_infeasible_solutions : bool, optional - If True, infeasible parameter values will be allowed in the optimisation (default: True). - - Returns - ------- - bool - A boolean which signifies whether the parameters are compatible. - - """ - if self.param_checker: - return self.param_checker(inputs, allow_infeasible_solutions) - return True diff --git a/pybop/models/lithium_ion/base_echem.py b/pybop/models/lithium_ion/base_echem.py index f60f500d..71f803c0 100644 --- a/pybop/models/lithium_ion/base_echem.py +++ b/pybop/models/lithium_ion/base_echem.py @@ -3,6 +3,7 @@ from pybamm import lithium_ion as pybamm_lithium_ion from pybop.models.base_model import BaseModel, Inputs +from pybop.parameters.parameter_set import ParameterSet class EChemBaseModel(BaseModel): @@ -85,7 +86,10 @@ def __init__( self.geometric_parameters = self.set_geometric_parameters() def _check_params( - self, inputs: Inputs = None, parameter_set=None, allow_infeasible_solutions=True + self, + inputs: Inputs, + parameter_set: ParameterSet, + allow_infeasible_solutions: bool = True, ): """ Check compatibility of the model parameters. @@ -94,6 +98,8 @@ def _check_params( ---------- inputs : Inputs The input parameters for the simulation. + parameter_set : pybop.parameter_set + A PyBOP parameter set object or a dictionary containing the parameter values. allow_infeasible_solutions : bool, optional If True, infeasible parameter values will be allowed in the optimisation (default: True). @@ -102,8 +108,6 @@ def _check_params( bool A boolean which signifies whether the parameters are compatible. """ - parameter_set = parameter_set or self._parameter_set - if self.pybamm_model.options["working electrode"] == "positive": electrode_params = [ ( diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 2b00b234..c6d68e81 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -3,7 +3,14 @@ import numpy as np -from pybop import BaseCost, BaseLikelihood, DesignCost, Parameter, Parameters +from pybop import ( + BaseCost, + BaseLikelihood, + DesignCost, + Parameter, + Parameters, + WeightedCost, +) class BaseOptimiser: @@ -58,6 +65,7 @@ def __init__( self.verbose = False self.log = dict(x=[], x_best=[], cost=[]) self.minimising = True + self.transformation = None self.physical_viability = False self.allow_infeasible_solutions = False self.default_max_iterations = 1000 @@ -65,8 +73,11 @@ def __init__( if isinstance(cost, BaseCost): self.cost = cost + self.transformation = self.cost.transformation self.parameters.join(cost.parameters) self.set_allow_infeasible_solutions() + if isinstance(cost, WeightedCost): + self.minimising = cost.minimising if isinstance(cost, (BaseLikelihood, DesignCost)): self.minimising = False @@ -131,6 +142,9 @@ def set_base_options(self): # Set other options self.verbose = self.unset_options.pop("verbose", self.verbose) self.minimising = self.unset_options.pop("minimising", self.minimising) + self.transformation = self.unset_options.pop( + "transformation", self.transformation + ) if "allow_infeasible_solutions" in self.unset_options.keys(): self.set_allow_infeasible_solutions( self.unset_options.pop("allow_infeasible_solutions") @@ -185,20 +199,6 @@ def _run(self): """ raise NotImplementedError - def store_optimised_parameters(self, x): - """ - Update the problem parameters with optimised values. - - The optimised parameter values are stored within the associated PyBOP parameter class. - - Parameters - ---------- - x : array-like - Optimised parameter values. - """ - for i, param in enumerate(self.cost.parameters): - param.update(value=x[i]) - def check_optimal_parameters(self, x): """ Check if the optimised parameters are physically viable. diff --git a/pybop/optimisers/base_pints_optimiser.py b/pybop/optimisers/base_pints_optimiser.py index f5698c8e..8bf856c4 100644 --- a/pybop/optimisers/base_pints_optimiser.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -48,9 +48,6 @@ def __init__(self, cost, pints_optimiser, **optimiser_kwargs): self._evaluations = None self._iterations = None - # PyBOP doesn't currently support the PINTS transformation class - self._transformation = None - self.pints_optimiser = pints_optimiser super().__init__(cost, **optimiser_kwargs) @@ -200,8 +197,8 @@ def f(x): return (L, dl) if self.minimising else (-L, -dl) else: - def f(x, grad=None): - return self.cost(x, grad) if self.minimising else -self.cost(x, grad) + def f(x): + return self.cost(x) if self.minimising else -self.cost(x) # Create evaluator object if self._parallel: @@ -325,8 +322,8 @@ def f(x, grad=None): # Show current parameters x_user = self.pints_optimiser.x_guessed() - if self._transformation is not None: - x_user = self._transformation.to_model(x_user) + if self.transformation is not None: + x_user = self.transformation.to_model(x_user) for p in x_user: print(PintsStrFloat(p)) print("-" * 40) @@ -348,8 +345,8 @@ def f(x, grad=None): f = self.pints_optimiser.f_best() # Inverse transform search parameters - if self._transformation is not None: - x = self._transformation.to_model(x) + if self.transformation is not None: + x = self.transformation.to_model(x) return Result( x=x, final_cost=f if self.minimising else -f, n_iterations=self._iterations diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 2614027c..07eca7db 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -4,6 +4,7 @@ import numpy as np +from pybop import ComposedTransformation, IdentityTransformation from pybop._utils import is_numeric Inputs = dict[str, float] @@ -37,7 +38,13 @@ class Parameter: """ def __init__( - self, name, initial_value=None, true_value=None, prior=None, bounds=None + self, + name, + initial_value=None, + true_value=None, + prior=None, + bounds=None, + transformation=None, ): """ Construct the parameter class with a name, initial value, prior, and bounds. @@ -47,6 +54,7 @@ def __init__( self.true_value = true_value self.initial_value = initial_value self.value = initial_value + self.transformation = transformation self.applied_prior_bounds = False self.set_bounds(bounds) self.margin = 1e-4 @@ -70,6 +78,9 @@ def rvs(self, n_samples: int = 1, random_state=None): """ samples = self.prior.rvs(n_samples, random_state=random_state) + if self.transformation is not None: + samples = self.transformation.to_search(samples) + # Constrain samples to be within bounds if self.bounds is not None: offset = self.margin * (self.upper_bound - self.lower_bound) @@ -151,25 +162,42 @@ def set_bounds(self, bounds=None, boundary_multiplier=6): if bounds is not None: if bounds[0] >= bounds[1]: raise ValueError("Lower bound must be less than upper bound") + elif self.transformation is not None: + self.lower_bound = np.ndarray.item( + self.transformation.to_search(bounds[0]) + ) + self.upper_bound = np.ndarray.item( + self.transformation.to_search(bounds[1]) + ) else: self.lower_bound = bounds[0] self.upper_bound = bounds[1] + elif self.prior is not None: self.applied_prior_bounds = True self.lower_bound = self.prior.mean - boundary_multiplier * self.prior.sigma self.upper_bound = self.prior.mean + boundary_multiplier * self.prior.sigma - bounds = [self.lower_bound, self.upper_bound] print("Default bounds applied based on prior distribution.") + else: + self.bounds = None + return - self.bounds = bounds + self.bounds = [self.lower_bound, self.upper_bound] def get_initial_value(self) -> float: """ Return the initial value of each parameter. """ if self.initial_value is None: - sample = self.rvs(1) - self.update(initial_value=sample[0]) + if self.prior is not None: + sample = self.rvs(1)[0] + self.update(initial_value=sample) + else: + warnings.warn( + "Initial value or Prior are None, proceeding without initial value.", + UserWarning, + stacklevel=2, + ) return self.initial_value @@ -191,6 +219,7 @@ def __init__(self, *args): self.param = OrderedDict() for param in args: self.add(param) + self.initial_value() def __getitem__(self, key: str) -> Parameter: """ @@ -364,12 +393,20 @@ def get_sigma0(self) -> list: for param in self.param.values(): if hasattr(param.prior, "sigma"): - sigma0.append(param.prior.sigma) + if param.transformation is None: + sigma0.append(param.prior.sigma) + else: + sigma0.append( + np.ndarray.item( + param.transformation.convert_standard_deviation( + param.prior.sigma, param.initial_value + ) + ) + ) else: all_have_sigma = False if not all_have_sigma: sigma0 = None - return sigma0 def initial_value(self) -> np.ndarray: @@ -379,10 +416,8 @@ def initial_value(self) -> np.ndarray: initial_values = [] for param in self.param.values(): - if param.initial_value is None: - initial_value = param.rvs(1)[0] - param.update(initial_value=initial_value) - initial_values.append(param.initial_value) + initial_value = param.get_initial_value() + initial_values.append(initial_value) return np.asarray(initial_values) @@ -408,6 +443,30 @@ def true_value(self) -> np.ndarray: return np.asarray(true_values) + def get_transformations(self): + """ + Get the transformations for each parameter. + """ + transformations = [] + + for param in self.param.values(): + transformations.append(param.transformation) + + return transformations + + def construct_transformation(self): + """ + Create a ComposedTransformation object from the individual parameter transformations. + """ + transformations = self.get_transformations() + if not transformations or all(t is None for t in transformations): + return None + + valid_transformations = [ + t if t is not None else IdentityTransformation() for t in transformations + ] + return ComposedTransformation(valid_transformations) + def get_bounds_for_plotly(self): """ Retrieve parameter bounds in the format expected by Plotly. diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index ee36a6bb..cb3bc597 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -64,6 +64,7 @@ def __init__( self.n_outputs = len(self.signal) self._time_data = None self._target = None + self.verbose = False if isinstance(model, BaseModel): self.additional_variables = additional_variables diff --git a/pybop/problems/design_problem.py b/pybop/problems/design_problem.py index 30e2d9c3..e6a82866 100644 --- a/pybop/problems/design_problem.py +++ b/pybop/problems/design_problem.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np from pybop import BaseProblem @@ -25,7 +27,7 @@ class DesignProblem(BaseProblem): additional_variables : List[str], optional Additional variables to observe and store in the solution (default additions are: ["Time [s]", "Current [A]"]). init_soc : float, optional - Initial state of charge (default: None). + Initial state of charge (default: 1.0). """ def __init__( @@ -46,6 +48,12 @@ def __init__( additional_variables.extend(["Time [s]", "Current [A]"]) additional_variables = list(set(additional_variables)) + if init_soc is None: + if "Initial SoC" in model._parameter_set.keys(): + init_soc = model._parameter_set["Initial SoC"] + else: + init_soc = 1.0 # default value + super().__init__( parameters, model, @@ -56,26 +64,17 @@ def __init__( ) self.experiment = experiment - # Build the model if required - if experiment is not None: - # Leave the build until later to apply the experiment - self._model.classify_and_update_parameters(self.parameters) - - elif self._model._built_model is None: - self._model.build( - experiment=self.experiment, - parameters=self.parameters, - check_model=self.check_model, - init_soc=self.init_soc, - ) - # Add an example dataset for plotting comparison sol = self.evaluate(self.parameters.as_dict("initial")) self._time_data = sol["Time [s]"] self._target = {key: sol[key] for key in self.signal} self._dataset = None + self.warning_patterns = [ + "Ah is greater than", + "Non-physical point encountered", + ] - def evaluate(self, inputs: Inputs): + def evaluate(self, inputs: Inputs, update_capacity=False): """ Evaluate the model with the given parameters and return the signal. @@ -90,19 +89,33 @@ def evaluate(self, inputs: Inputs): The model output y(t) simulated with inputs. """ inputs = self.parameters.verify(inputs) - - sol = self._model.predict( - inputs=inputs, - experiment=self.experiment, - init_soc=self.init_soc, - ) - - if sol == [np.inf]: - return sol - - else: - predictions = {} - for signal in self.signal + self.additional_variables: - predictions[signal] = sol[signal].data + if update_capacity: + self.model.approximate_capacity(inputs) + + try: + with warnings.catch_warnings(): + for pattern in self.warning_patterns: + warnings.filterwarnings( + "error", category=UserWarning, message=pattern + ) + + sol = self._model.predict( + inputs=inputs, + experiment=self.experiment, + init_soc=self.init_soc, + ) + + # Catch infeasible solutions and return infinity + except (UserWarning, Exception) as e: + if self.verbose: + print(f"Ignoring this sample due to: {e}") + return { + signal: np.asarray(np.ones(2) * -np.inf) + for signal in [*self.signal, *self.additional_variables] + } + + predictions = {} + for signal in self.signal + self.additional_variables: + predictions[signal] = sol[signal].data return predictions diff --git a/pybop/problems/fitting_problem.py b/pybop/problems/fitting_problem.py index 58d59e9e..0ad636ce 100644 --- a/pybop/problems/fitting_problem.py +++ b/pybop/problems/fitting_problem.py @@ -79,7 +79,7 @@ def __init__( init_soc=self.init_soc, ) - def evaluate(self, inputs: Inputs): + def evaluate(self, inputs: Inputs, update_capacity=False): """ Evaluate the model with the given parameters and return the signal. diff --git a/pybop/transformation/base_transformation.py b/pybop/transformation/base_transformation.py new file mode 100644 index 00000000..c9905775 --- /dev/null +++ b/pybop/transformation/base_transformation.py @@ -0,0 +1,160 @@ +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import Union + +import numpy as np + + +class Transformation(ABC): + """ + Abstract base class for transformations between two parameter spaces: the model + parameter space and a search space. + + If `transform` is an instance of a `Transformation` class, you can apply the + transformation of a parameter vector from the model space `p` to the search + space `q` using `q = transform.to_search(p)` and the inverse using `p = transform.to_model(q)`. + + Based on pints.transformation method. + + References + ---------- + .. [1] Erik Jorgensen and Asger Roer Pedersen. "How to Obtain Those Nasty Standard Errors From Transformed Data." + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.9023 + .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. + """ + + # ---- To be implemented with Monte Carlo PR ------ # + # def convert_log_prior(self, log_prior): + # """Returns a transformed log-prior class.""" + # return TransformedLogPrior(log_prior, self) + + def convert_covariance_matrix(self, cov: np.ndarray, q: np.ndarray) -> np.ndarray: + """ + Converts a covariance matrix `covariance` from the model space to the search space + around a parameter vector `q` in the search space. + """ + jac_inv = np.linalg.pinv(self.jacobian(q)) + return jac_inv @ cov @ jac_inv.T + + def convert_standard_deviation( + self, std: Union[float, np.ndarray], q: np.ndarray + ) -> np.ndarray: + """ + Converts standard deviation `std`, either a scalar or a vector, from the model space + to the search space around a parameter vector `q` in the search space. + """ + if isinstance(q, (int, float)): + q = np.asarray([q]) + jac_inv = np.linalg.pinv(self.jacobian(q)) + cov = jac_inv @ jac_inv.T + return std * np.sqrt(np.diagonal(cov)) + + @abstractmethod + def jacobian(self, q: np.ndarray) -> np.ndarray: + """Returns the Jacobian matrix of the transformation at the parameter vector `q`.""" + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, Sequence[np.ndarray]]: + """ + Computes the Jacobian matrix and its partial derivatives at the parameter vector `q`. + + Returns a tuple `(jacobian, hessian)`. + """ + raise NotImplementedError("jacobian_S1 method must be implemented if used.") + + def log_jacobian_det(self, q: np.ndarray) -> float: + """ + Returns the logarithm of the absolute value of the determinant of the Jacobian matrix + at the parameter vector `q`. + """ + raise NotImplementedError( + "log_jacobian_det method must be implemented if used." + ) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """ + Computes the logarithm of the absolute value of the determinant of the Jacobian, + and returns it along with its partial derivatives. + """ + raise NotImplementedError( + "log_jacobian_det_S1 method must be implemented if used." + ) + + @property + def n_parameters(self): + return self._n_parameters + + def to_model(self, q: np.ndarray) -> np.ndarray: + """Transforms a parameter vector `q` from the search space to the model space.""" + return self._transform(q, method="to_model") + + def to_search(self, p: np.ndarray) -> np.ndarray: + """Transforms a parameter vector `p` from the model space to the search space.""" + return self._transform(p, method="to_search") + + @abstractmethod + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """ + Transforms a parameter vector `x` from the search space to the model space if `method` + is "to_model", or from the model space to the search space if `method` is "to_search". + """ + + def is_elementwise(self) -> bool: + """ + Returns `True` if the transformation is element-wise, meaning it can be applied + element-by-element independently. + """ + raise NotImplementedError("is_elementwise method must be implemented if used.") + + def _verify_input( + self, inputs: Union[float, int, list[float], np.ndarray, dict[str, float]] + ) -> np.ndarray: + """Set and validate the transformation parameter.""" + if isinstance(inputs, (float, int)): + return np.full(self._n_parameters, float(inputs)) + + if isinstance(inputs, dict): + inputs = list(inputs.values()) + + try: + input_array = np.asarray(inputs, dtype=float) + except (ValueError, TypeError) as e: + raise TypeError( + "Transform must be a float, int, list, numpy array, or dictionary" + ) from e + + if input_array.size != self._n_parameters: + raise ValueError(f"Transform must have {self._n_parameters} elements") + + return input_array + + +# ---- To be implemented with Monte Carlo PR ------ # +# class TransformedLogPDF(BaseCost): +# """Transformed log-PDF class.""" +# def __init__(self, log_pdf, transformation): +# self._log_pdf = log_pdf +# self._transformation = transformation + +# def __call__(self, q): +# p = self._transformation.to_model(q) +# log_pdf = self._log_pdf(p) + +# # Calculate the PDF using change of variable +# # Wikipedia: https://w.wiki/UsJ +# log_jacobian_det = self._transformation.log_jacobian_det(q) +# return log_pdf + log_jacobian_det + +# def _evaluateS1(self, x): +# p = self._transformation.to_model(x) +# log_pdf, log_pdf_derivatives = self._log_pdf._evaluateS1(p) +# log_jacobian_det, log_jacobian_det_derivatives = self._transformation.log_jacobian_det_S1(x) +# return log_pdf + log_jacobian_det, log_pdf_derivatives + log_jacobian_det_derivatives + +# class TransformedLogPrior: +# """Transformed log-prior class.""" +# def __init__(self, log_prior, transformation): +# self._log_prior = log_prior +# self._transformation = transformation + +# def __call__(self, q): +# return self diff --git a/pybop/transformation/transformations.py b/pybop/transformation/transformations.py new file mode 100644 index 00000000..782fe46b --- /dev/null +++ b/pybop/transformation/transformations.py @@ -0,0 +1,364 @@ +from typing import Union + +import numpy as np + +from pybop import Transformation + + +class IdentityTransformation(Transformation): + """ + This class implements a trivial transformation where the model parameter space + and the search space are identical. It extends the base Transformation class. + + The transformation is defined as: + - to_search: y = x + - to_model: x = y + + Key properties: + 1. Jacobian: Identity matrix + 2. Log determinant of Jacobian: Always 0 + 3. Elementwise: True (each output dimension depends only on the corresponding input dimension) + + Use cases: + 1. When no transformation is needed between spaces + 2. As a placeholder in composite transformations + 3. For testing and benchmarking other transformations + + Note: While this transformation doesn't change the parameters, it still provides + all the methods required by the Transformation interface, making it useful in + scenarios where a transformation object is expected but no actual transformation + is needed. + + Initially based on pints.IdentityTransformation method. + """ + + def __init__(self, n_parameters: int = 1): + self._n_parameters = n_parameters + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.eye(self._n_parameters) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + n = self._n_parameters + return self.jacobian(q), np.zeros((n, n, n)) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return 0.0 + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), np.zeros(self._n_parameters) + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + return np.asarray(x) + + +class ScaledTransformation(Transformation): + """ + This class implements a linear transformation between the model parameter space + and a search space, using a coefficient (scale factor) and an intercept (offset). + It extends the base Transformation class. + + The transformation is defined as: + - to_search: y = coefficient * (x + intercept) + - to_model: x = y / coefficient - intercept + + Where: + - x is in the model parameter space + - y is in the search space + - coefficient is the scaling factor + - intercept is the offset + + This transformation is useful for scaling and shifting parameters to a more + suitable range for optimisation algorithms. + + Based on pints.ScaledTransformation class. + """ + + def __init__( + self, + coefficient: Union[list, float, np.ndarray], + intercept: Union[list, float, np.ndarray] = 0, + n_parameters: int = 1, + ): + self._n_parameters = n_parameters + self._intercept = self._verify_input(intercept) + self._coefficient = self._verify_input(coefficient) + self._inverse_coeff = np.reciprocal(self._coefficient) + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.diag(self._inverse_coeff) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + n = self._n_parameters + return self.jacobian(q), np.zeros((n, n, n)) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return np.sum(np.log(np.abs(self._coefficient))) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), np.zeros(self._n_parameters) + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + x = self._verify_input(x) + if method == "to_model": + return x * self._inverse_coeff - self._intercept + elif method == "to_search": + return self._coefficient * (x + self._intercept) + else: + raise ValueError(f"Unknown method: {method}") + + +class LogTransformation(Transformation): + """ + This class implements a logarithmic transformation between the model parameter space + and a search space. It extends the base Transformation class. + + The transformation is defined as: + - to_search: y = log(x) + - to_model: x = exp(y) + + Where: + - x is in the model parameter space (strictly positive) + - y is in the search space (can be any real number) + + This transformation is particularly useful for: + 1. Parameters that are strictly positive and may span several orders of magnitude. + 2. Converting multiplicative processes to additive ones in the search space. + 3. Ensuring positivity constraints without explicit bounds in optimisation. + + Note: Care should be taken when using this transformation, as it can introduce + bias in the parameter estimates if not accounted for properly in the likelihood + or cost function. Simply, E[log(x)] <= log(E[x]) as per to Jensen's inequality. + For more information, see Jensen's inequality: + https://en.wikipedia.org/w/index.php?title=Jensen%27s_inequality&oldid=1212437916#Probabilistic_form + + Initially based on pints.LogTransformation class. + """ + + def __init__(self, n_parameters: int = 1): + self._n_parameters = n_parameters + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.diag(1 / q) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + return np.diag(1 / q), np.diag(-1 / q**2) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return np.sum(-np.log(q)) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), -1 / q + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + x = self._verify_input(x) + if method == "to_model": + return np.exp(x) + elif method == "to_search": + return np.log(x) + else: + raise ValueError(f"Unknown method: {method}") + + +class ComposedTransformation(Transformation): + """ + N-dimensional Transformation composed of one or more other N_i-dimensional + sub-transformations, where the sum of N_i equals N. + + This class allows for the composition of multiple transformations, each potentially + operating on a different number of dimensions. The total dimensionality of the + composed transformation is the sum of the dimensionalities of its components. + + The dimensionality of the individual transformations does not have to be + the same, i.e., N_i != N_j is allowed. + + Extends pybop.Transformation. Initially based on pints.ComposedTransformation class. + """ + + def __init__(self, transformations: list[Transformation]): + if not transformations: + raise ValueError("Must have at least one sub-transformation.") + self._transformations = [] + self._n_parameters = 0 + self._is_elementwise = True + for transformation in transformations: + self._append_transformation(transformation) + + def _append_transformation(self, transformation: Transformation): + """ + Append a transformation to the internal list of transformations. + + Parameters + ---------- + transformation : Transformation + Transformation to append. + + Raises + ------ + ValueError + If the appended object is not a Transformation. + """ + if not isinstance(transformation, Transformation): + raise TypeError("The appended object must be a Transformation.") + self._transformations.append(transformation) + self._n_parameters += transformation.n_parameters + self._is_elementwise = self._is_elementwise and transformation.is_elementwise() + + def append(self, transformation: Transformation): + """ + Append a new transformation to the existing composition. + + Parameters + ---------- + transformation : Transformation + The transformation to append. + """ + self._append_transformation(transformation) + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return self._is_elementwise + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """ + Compute the elementwise Jacobian of the composed transformation. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + np.ndarray + Diagonal matrix representing the elementwise Jacobian. + """ + q = self._verify_input(q) + diag = np.zeros(self._n_parameters) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + diag[lo:hi] = np.diagonal(transformation.jacobian(q[lo:hi])) + lo = hi + + return np.diag(diag) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + q = self._verify_input(q) + output_S1 = np.zeros( + (self._n_parameters, self._n_parameters, self._n_parameters) + ) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + _, jac_S1 = transformation.jacobian_S1(q[lo:hi]) + for i, jac_S1_i in enumerate(jac_S1): + output_S1[lo + i, lo:hi, lo:hi] = jac_S1_i + lo = hi + + return self.jacobian(q), output_S1 + + def log_jacobian_det(self, q: np.ndarray) -> float: + """ + Compute the elementwise logarithm of the determinant of the Jacobian. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + float + Sum of log determinants of individual transformations. + """ + q = self._verify_input(q) + return sum( + transformation.log_jacobian_det(q[lo : lo + transformation.n_parameters]) + for lo, transformation in self._iter_transformations() + ) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """ + Compute the elementwise logarithm of the determinant of the Jacobian and its first-order sensitivities. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + Tuple[float, np.ndarray] + Tuple of sum of log determinants and concatenated first-order sensitivities. + """ + q = self._verify_input(q) + output = 0.0 + output_S1 = np.zeros(self._n_parameters) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + j, j_S1 = transformation.log_jacobian_det_S1(q[lo:hi]) + output += j + output_S1[lo:hi] = j_S1 + lo = hi + + return output, output_S1 + + def _transform(self, data: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + data = self._verify_input(data) + output = np.zeros_like(data) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + output[lo:hi] = getattr(transformation, method)(data[lo:hi]) + lo = hi + + return output + + def _iter_transformations(self): + """ + Iterate over the transformations in the composition. + + Yields + ------ + Tuple[int, Transformation] + Tuple of starting index and transformation object for each sub-transformation. + """ + lo = 0 + for transformation in self._transformations: + yield lo, transformation + lo += transformation.n_parameters diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index 47782a03..34a6e8c6 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -104,6 +104,8 @@ def test_optimisation_f_guessed(self, f_guessed, spm_costs): assert initial_cost > final_cost else: assert initial_cost < final_cost + else: + raise ValueError("Initial value is the same as the ground truth value.") np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x, init_soc): diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 5e9d6b00..b3188e19 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -196,7 +196,6 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): # Test each optimiser optim = multi_optimiser( cost=spm_two_signal_cost, - sigma0=0.03, max_iterations=250, ) diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 98dde5cb..fbb9f6cb 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -90,6 +90,8 @@ def test_optimisers_on_simple_model(self, optimiser, cost): assert initial_cost > final_cost else: assert initial_cost < final_cost + else: + raise ValueError("Initial value is the same as the ground truth value.") np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x): diff --git a/tests/integration/test_transformation.py b/tests/integration/test_transformation.py new file mode 100644 index 00000000..4ca5a0c1 --- /dev/null +++ b/tests/integration/test_transformation.py @@ -0,0 +1,103 @@ +import numpy as np +import pytest + +import pybop + + +class TestTransformation: + """ + A class for integration tests of the transformation methods. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.ground_truth = np.array([0.5, 0.2]) + np.random.normal( + loc=0.0, scale=0.05, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet.pybamm("Chen2020") + return pybop.lithium_ion.SPMe(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.7), + bounds=[0.375, 0.725], + transformation=pybop.LogTransformation(), + ), + pybop.Parameter( + "Positive electrode conductivity [S.m-1]", + prior=pybop.Uniform(0.05, 0.45), + bounds=[0.04, 0.5], + transformation=pybop.LogTransformation(), + ), + ) + + @pytest.fixture(params=[0.4, 0.7]) + def init_soc(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def cost(self, model, parameters, init_soc): + # Form dataset + solution = self.get_data(model, parameters, self.ground_truth, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(0.002, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) + return pybop.RootMeanSquaredError(problem) + + @pytest.mark.parametrize( + "optimiser", + [ + pybop.AdamW, + pybop.CMAES, + ], + ) + @pytest.mark.integration + def test_spm_optimisers(self, optimiser, cost): + x0 = cost.parameters.initial_value() + optim = optimiser( + cost=cost, + max_unchanged_iterations=35, + max_iterations=125, + ) + + initial_cost = optim.cost(x0) + x, final_cost = optim.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + else: + raise ValueError("Initial value is the same as the ground truth value.") + + def get_data(self, model, parameters, x, init_soc): + model.parameters = parameters + experiment = pybop.Experiment( + [ + ( + "Discharge at 1C for 3 minutes (4 second period)", + "Charge at 1C for 3 minutes (4 second period)", + ), + ] + ) + sim = model.predict( + init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict(x) + ) + return sim diff --git a/tests/integration/test_weighted_cost.py b/tests/integration/test_weighted_cost.py new file mode 100644 index 00000000..e964e970 --- /dev/null +++ b/tests/integration/test_weighted_cost.py @@ -0,0 +1,185 @@ +import numpy as np +import pytest + +import pybop + + +class TestWeightedCost: + """ + A class to test the weighted cost function. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.sigma0 = 0.002 + self.ground_truth = np.asarray([0.55, 0.55]) + np.random.normal( + loc=0.0, scale=0.05, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet.pybamm("Chen2020") + return pybop.lithium_ion.SPM(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.75), + bounds=[0.375, 0.75], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.75), + # no bounds + ), + ) + + @pytest.fixture(params=[0.4]) + def init_soc(self, request): + return request.param + + @pytest.fixture( + params=[ + ( + pybop.GaussianLogLikelihoodKnownSigma, + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + pybop.MAP, + ) + ] + ) + def cost_class(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def weighted_fitting_cost(self, model, parameters, cost_class, init_soc): + # Form dataset + solution = self.get_data(model, parameters, self.ground_truth, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(self.sigma0, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) + costs = [] + for cost in cost_class: + if issubclass(cost, pybop.MAP): + costs.append( + cost( + problem, + pybop.GaussianLogLikelihoodKnownSigma, + sigma0=self.sigma0, + ) + ) + elif issubclass(cost, pybop.BaseLikelihood): + costs.append(cost(problem, sigma0=self.sigma0)) + else: + costs.append(cost(problem)) + + return pybop.WeightedCost(*costs, weights=[0.1, 1, 0.5, 0.6]) + + @pytest.mark.integration + def test_fitting_costs(self, weighted_fitting_cost): + x0 = weighted_fitting_cost.parameters.initial_value() + optim = pybop.CuckooSearch( + cost=weighted_fitting_cost, + sigma0=0.03, + max_iterations=250, + max_unchanged_iterations=35, + ) + + initial_cost = optim.cost(optim.parameters.initial_value()) + x, final_cost = optim.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + if optim.minimising: + assert initial_cost > final_cost + else: + assert initial_cost < final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + + @pytest.fixture( + params=[ + ( + pybop.GravimetricEnergyDensity, + pybop.VolumetricEnergyDensity, + ) + ] + ) + def design_cost(self, request): + return request.param + + @pytest.fixture + def weighted_design_cost(self, model, design_cost): + init_soc = 1.0 + parameters = pybop.Parameters( + pybop.Parameter( + "Positive electrode thickness [m]", + prior=pybop.Gaussian(5e-05, 5e-06), + bounds=[2e-06, 10e-05], + ), + pybop.Parameter( + "Negative electrode thickness [m]", + prior=pybop.Gaussian(5e-05, 5e-06), + bounds=[2e-06, 10e-05], + ), + ) + experiment = pybop.Experiment( + ["Discharge at 1C until 3.5 V (5 seconds period)"], + ) + + problem = pybop.DesignProblem( + model, parameters, experiment=experiment, init_soc=init_soc + ) + costs_update_capacity = [] + costs = [] + for cost in design_cost: + costs_update_capacity.append(cost(problem, update_capacity=True)) + costs.append(cost(problem)) + + return [ + pybop.WeightedCost(*costs, weights=[1.0, 0.1]), + pybop.WeightedCost(*costs_update_capacity, weights=[0.1, 1.0]), + ] + + @pytest.mark.integration + @pytest.mark.parametrize("cost_index", [0, 1]) + def test_design_costs(self, weighted_design_cost, cost_index): + cost = weighted_design_cost[cost_index] + optim = pybop.CuckooSearch( + cost, + max_iterations=15, + allow_infeasible_solutions=False, + ) + initial_values = optim.parameters.initial_value() + initial_cost = optim.cost(initial_values) + x, final_cost = optim.run() + + # Assertions + assert initial_cost < final_cost + for i, _ in enumerate(x): + assert x[i] > initial_values[i] + + def get_data(self, model, parameters, x, init_soc): + model.classify_and_update_parameters(parameters) + experiment = pybop.Experiment( + [ + ( + "Discharge at 0.5C for 3 minutes (4 second period)", + "Charge at 0.5C for 3 minutes (4 second period)", + ), + ] + ) + sim = model.predict(init_soc=init_soc, experiment=experiment, inputs=x) + return sim diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index b67f5ab9..309b7e74 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -297,7 +297,7 @@ def test_design_costs(self, cost_class, design_problem): # Compute after updating nominal capacity cost = cost_class(design_problem, update_capacity=True) - assert np.isfinite(cost([0.4])) + cost([0.4]) @pytest.mark.unit def test_weighted_fitting_cost(self, problem): @@ -313,25 +313,25 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_array_equal(weighted_cost.weights, np.ones(2)) with pytest.raises( TypeError, - match=r"Received instead of cost object.", + match="All costs must be instances of BaseCost.", ): - weighted_cost = pybop.WeightedCost("Invalid string") + weighted_cost = pybop.WeightedCost(cost1.problem) with pytest.raises( - TypeError, - match="Expected a list or array of weights the same length as costs.", + ValueError, + match="Weights must be numeric values.", ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights="Invalid string") with pytest.raises( ValueError, - match="Expected a list or array of weights the same length as costs.", + match="Number of weights must match number of costs.", ): weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1]) - # Test with and without different problems + # Test with identical problems weight = 100 weighted_cost_2 = pybop.WeightedCost(cost1, cost2, weights=[1, weight]) - assert weighted_cost_2._different_problems is False - assert weighted_cost_2._fixed_problem is True + assert weighted_cost_2.has_identical_problems is True + assert weighted_cost_2.has_separable_problem is False assert weighted_cost_2.problem is problem assert weighted_cost_2([0.5]) >= 0 np.testing.assert_allclose( @@ -340,10 +340,11 @@ def test_weighted_fitting_cost(self, problem): atol=1e-5, ) + # Test with different problems cost3 = pybop.RootMeanSquaredError(copy(problem)) weighted_cost_3 = pybop.WeightedCost(cost1, cost3, weights=[1, weight]) - assert weighted_cost_3._different_problems is True - assert weighted_cost_3._fixed_problem is False + assert weighted_cost_3.has_identical_problems is False + assert weighted_cost_3.has_separable_problem is False assert weighted_cost_3.problem is None assert weighted_cost_3([0.5]) >= 0 np.testing.assert_allclose( @@ -357,15 +358,69 @@ def test_weighted_fitting_cost(self, problem): np.testing.assert_allclose(errors_2, errors_3, atol=1e-5) np.testing.assert_allclose(sensitivities_2, sensitivities_3, atol=1e-5) + # Test MAP explicitly + cost4 = pybop.MAP(problem, pybop.GaussianLogLikelihood) + weighted_cost_4 = pybop.WeightedCost(cost1, cost4, weights=[1, weight]) + assert weighted_cost_4.has_identical_problems is False + assert weighted_cost_4.has_separable_problem is False + sigma = 0.05 + assert weighted_cost_4([0.5, sigma]) <= 0 + np.testing.assert_allclose( + weighted_cost_4.evaluate([0.6, sigma]), + cost1([0.6, sigma]) + weight * cost4([0.6, sigma]), + atol=1e-5, + ) + @pytest.mark.unit def test_weighted_design_cost(self, design_problem): cost1 = pybop.GravimetricEnergyDensity(design_problem) - cost2 = pybop.RootMeanSquaredError(design_problem) + cost2 = pybop.VolumetricEnergyDensity(design_problem) - # Test with and without weights + # Test DesignCosts with identical problems weighted_cost = pybop.WeightedCost(cost1, cost2) - assert weighted_cost._different_problems is False - assert weighted_cost._fixed_problem is False + assert weighted_cost.has_identical_problems is True + assert weighted_cost.has_separable_problem is False + assert weighted_cost.problem is design_problem + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) + + # Test DesignCosts with different problems + cost3 = pybop.VolumetricEnergyDensity(copy(design_problem)) + weighted_cost = pybop.WeightedCost(cost1, cost3) + assert weighted_cost.has_identical_problems is False + assert weighted_cost.has_separable_problem is False + for i, _ in enumerate(weighted_cost.costs): + assert isinstance(weighted_cost.costs[i].problem, pybop.DesignProblem) + + # Ensure attributes are set correctly and not modified via side-effects + assert weighted_cost.update_capacity is False + assert weighted_cost.costs[0].update_capacity is False + assert weighted_cost.costs[1].update_capacity is False + + assert weighted_cost([0.5]) >= 0 + np.testing.assert_allclose( + weighted_cost.evaluate([0.6]), + cost1([0.6]) + cost2([0.6]), + atol=1e-5, + ) + + @pytest.mark.unit + def test_weighted_design_cost_with_update_capacity(self, design_problem): + cost1 = pybop.GravimetricEnergyDensity(design_problem, update_capacity=True) + cost2 = pybop.VolumetricEnergyDensity(design_problem) + weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1]) + + # Ensure attributes are set correctly and not modified via side-effects + assert weighted_cost.update_capacity is True + assert weighted_cost.costs[0].update_capacity is True + assert weighted_cost.costs[1].update_capacity is False + + assert weighted_cost.has_identical_problems is True + assert weighted_cost.has_separable_problem is False assert weighted_cost.problem is design_problem assert weighted_cost([0.5]) >= 0 np.testing.assert_allclose( @@ -373,3 +428,13 @@ def test_weighted_design_cost(self, design_problem): cost1([0.6]) + cost2([0.6]), atol=1e-5, ) + + @pytest.mark.unit + def test_mixed_problem_classes(self, problem, design_problem): + cost1 = pybop.SumSquaredError(problem) + cost2 = pybop.GravimetricEnergyDensity(design_problem) + with pytest.raises( + TypeError, + match="All problems must be of the same class type.", + ): + pybop.WeightedCost(cost1, cost2) diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index aa68cc0e..56ae1e44 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -85,7 +85,7 @@ def test_base_likelihood_call_raises_not_implemented_error( ): likelihood = pybop.BaseLikelihood(one_signal_problem) with pytest.raises(NotImplementedError): - likelihood(np.array([0.5, 0.5])) + likelihood(np.array([0.5])) @pytest.mark.unit def test_likelihood_check_sigma0(self, one_signal_problem): @@ -128,8 +128,8 @@ def test_gaussian_log_likelihood_known_sigma(self, problem_name, request): @pytest.mark.unit def test_gaussian_log_likelihood(self, one_signal_problem): likelihood = pybop.GaussianLogLikelihood(one_signal_problem) - result = likelihood(np.array([0.5, 0.5])) - grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) + result = likelihood(np.array([0.8, 0.2])) + grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.8, 0.2])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) assert np.all(grad_likelihood <= 0) diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index 348edb4a..7c906764 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -1,3 +1,6 @@ +import sys +from io import StringIO + import numpy as np import pybamm import pytest @@ -85,7 +88,10 @@ def test_non_default_solver(self): def test_predict_without_pybamm(self, model): model._unprocessed_model = None - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match="The predict method currently only supports PyBaMM models.", + ): model.predict(None, None) @pytest.mark.unit @@ -113,6 +119,12 @@ def test_predict_with_inputs(self, model): res = model.predict(t_eval=t_eval, inputs=inputs) assert len(res["Voltage [V]"].data) == 100 + with pytest.raises( + ValueError, + match="The predict method requires either an experiment or t_eval to be specified.", + ): + model.predict(inputs=inputs) + @pytest.mark.unit def test_predict_without_allow_infeasible_solutions(self, model): if isinstance(model, (pybop.lithium_ion.SPM, pybop.lithium_ion.SPMe)): @@ -220,6 +232,12 @@ def test_rebuild_geometric_parameters(self): t_eval = np.linspace(0, 100, 100) out_init = initial_built_model.predict(t_eval=t_eval) + with pytest.raises( + ValueError, + match="Cannot use sensitivies for parameters which require a model rebuild", + ): + model.simulateS1(t_eval=t_eval, inputs=parameters.as_dict()) + # Test that the model can be rebuilt with different geometric parameters parameters["Positive particle radius [m]"].update(5e-06) parameters["Negative electrode thickness [m]"].update(45e-06) @@ -329,6 +347,9 @@ def test_thevenin_model(self): == model.pybamm_model.default_parameter_values["Open-circuit voltage [V]"] ) + model.predict(init_soc=0.5, t_eval=np.arange(0, 10, 5)) + assert model._parameter_set["Initial SoC"] == 0.5 + @pytest.mark.unit def test_check_params(self): base = pybop.BaseModel() @@ -402,3 +423,24 @@ def test_non_converged_solution(self): for key in problem.signal: assert np.allclose(output.get(key, [])[0], output.get(key, [])) assert np.allclose(output_S1.get(key, [])[0], output_S1.get(key, [])) + + @pytest.mark.unit + def test_get_parameter_info(self, model): + if isinstance(model, pybop.empirical.Thevenin): + # Test at least one model without a built pybamm model + model = pybop.empirical.Thevenin(build=False) + + parameter_info = model.get_parameter_info() + assert isinstance(parameter_info, dict) + + captured_output = StringIO() + sys.stdout = captured_output + + model.get_parameter_info(print_info=True) + sys.stdout = sys.__stdout__ + + printed_messaage = captured_output.getvalue().strip() + + for key, value in parameter_info.items(): + assert key in printed_messaage + assert value in printed_messaage diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 90c43622..e48cda8e 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -1,3 +1,4 @@ +import numpy as np import pytest import pybop @@ -212,3 +213,19 @@ def test_get_sigma(self, parameter): parameter.prior = None assert params.get_sigma0() is None + + @pytest.mark.unit + def test_initial_values_without_attributes(self): + # Test without initial values + parameter = pybop.Parameters( + pybop.Parameter( + "Negative electrode conductivity [S.m-1]", + ) + ) + with pytest.warns( + UserWarning, + match="Initial value or Prior are None, proceeding without initial value.", + ): + sample = parameter.initial_value() + + np.testing.assert_equal(sample, np.array([None])) diff --git a/tests/unit/test_problem.py b/tests/unit/test_problem.py index c2c40a03..b7660cfc 100644 --- a/tests/unit/test_problem.py +++ b/tests/unit/test_problem.py @@ -171,11 +171,18 @@ def test_design_problem(self, parameters, experiment, model): assert ( problem._model._built_model is None ) # building postponed with input experiment + assert problem.init_soc == 1.0 # Test model.predict model.predict(inputs=[1e-5, 1e-5], experiment=experiment) model.predict(inputs=[3e-5, 3e-5], experiment=experiment) + # Test init_soc from parameter_set + model = pybop.empirical.Thevenin() + model._parameter_set["Initial SoC"] = 0.8 + problem = pybop.DesignProblem(model, pybop.Parameters(), experiment) + assert problem.init_soc == 0.8 + @pytest.mark.unit def test_problem_construct_with_model_predict( self, parameters, model, dataset, signal diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py new file mode 100644 index 00000000..9a86d1f0 --- /dev/null +++ b/tests/unit/test_transformations.py @@ -0,0 +1,231 @@ +import inspect + +import numpy as np +import pytest + +import pybop + + +class TestTransformation: + """ + A class to test the transformations. + """ + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Identity", + transformation=pybop.IdentityTransformation(), + ), + pybop.Parameter( + "Scaled", + transformation=pybop.ScaledTransformation(coefficient=2.0, intercept=1), + ), + pybop.Parameter( + "Log", + transformation=pybop.LogTransformation(), + ), + ) + + @pytest.mark.unit + def test_identity_transformation(self, parameters): + q = np.asarray([5.0]) + transformation = parameters["Identity"].transformation + assert np.array_equal(transformation.to_model(q), q) + assert np.array_equal(transformation.to_search(q), q) + assert transformation.log_jacobian_det(q) == 0.0 + assert transformation.log_jacobian_det_S1(q) == (0.0, np.zeros(1)) + assert transformation.n_parameters == 1 + assert transformation.is_elementwise() + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.eye(1)) + assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + + # Test covariance transformation + cov = np.array([[0.5]]) + q = np.array([5.0]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov) + + @pytest.mark.unit + def test_scaled_transformation(self, parameters): + q = np.asarray([2.5]) + transformation = parameters["Scaled"].transformation + p = transformation.to_model(q) + assert np.allclose(p, (q / 2.0) - 1.0) + assert transformation.n_parameters == 1 + assert transformation.is_elementwise() + + q_transformed = transformation.to_search(p) + assert np.allclose(q_transformed, q) + assert np.allclose( + transformation.log_jacobian_det(q), np.sum(np.log(np.abs(2.0))) + ) + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == np.sum(np.log(np.abs(2.0))) + assert log_jac_det_S1[1] == np.zeros(1) + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.diag([0.5])) + assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + + # Test covariance transformation + cov = np.array([[0.5]]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov * transformation._coefficient**2) + + # Test incorrect transform + with pytest.raises(ValueError, match="Unknown method:"): + transformation._transform(q, "bad-string") + + @pytest.mark.unit + def test_log_transformation(self, parameters): + q = np.asarray([10]) + transformation = parameters["Log"].transformation + p = transformation.to_model(q) + assert np.allclose(p, np.exp(q)) + assert transformation.n_parameters == 1 + + q_transformed = transformation.to_search(p) + assert np.allclose(q_transformed, q) + assert np.allclose(transformation.log_jacobian_det(q), -np.sum(np.log(q))) + + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == -np.sum(np.log(q)) + assert log_jac_det_S1[1] == -1 / q + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.diag(1 / q)) + assert np.array_equal(jac_S1, np.diag(-1 / q**2)) + + # Test covariance transformation + cov = np.array([[0.5]]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov * q**2) + + # Test incorrect transform + with pytest.raises(ValueError, match="Unknown method:"): + transformation._transform(q, "bad-string") + + @pytest.mark.unit + def test_composed_transformation(self, parameters): + # Test elementwise transformations + transformation = pybop.ComposedTransformation( + [parameters["Identity"].transformation, parameters["Scaled"].transformation] + ) + # Test appending transformations + transformation.append(parameters["Log"].transformation) + assert transformation.is_elementwise() is True + + q = np.asarray([5.0, 2.5, 10]) + p = transformation.to_model(q) + np.testing.assert_allclose( + p, np.asarray([5.0, ((q[1] / 2.0) - 1.0), np.exp(q[2])]) + ) + jac = transformation.jacobian(q) + jac_S1 = transformation.jacobian_S1(q) + np.testing.assert_allclose( + jac, np.asarray([[1, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.1]]) + ) + np.testing.assert_allclose(jac_S1[0], jac) + assert jac_S1[1].shape == (3, 3, 3) + assert jac_S1[1][2, 2, 2] == -0.01 + np.testing.assert_allclose(jac_S1[1][0, :, :], np.zeros((3, 3))) + np.testing.assert_allclose(jac_S1[1][1, :, :], np.zeros((3, 3))) + + correct_output = np.sum(np.log(np.abs(2.0))) + np.sum(-np.log(10)) + log_jac_det = transformation.log_jacobian_det(q) + assert log_jac_det == correct_output + + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == correct_output + np.testing.assert_allclose(log_jac_det_S1[1], np.asarray([0.0, 0.0, -0.1])) + + # Test composed with no transformations + with pytest.raises( + ValueError, match="Must have at least one sub-transformation." + ): + pybop.ComposedTransformation([]) + + # Test incorrect append object + with pytest.raises( + TypeError, match="The appended object must be a Transformation." + ): + pybop.ComposedTransformation( + [parameters["Identity"].transformation, "string"] + ) + + @pytest.mark.unit + def test_verify_input(self, parameters): + q = np.asarray([5.0]) + q_dict = {"Identity": q[0]} + transformation = parameters["Scaled"].transformation + assert transformation._verify_input(q) == q + assert transformation._verify_input(q.tolist()) == q + assert transformation._verify_input(q_dict) == q + + with pytest.raises( + TypeError, match="Transform must be a float, int, list, numpy array," + ): + transformation._verify_input("string") + + with pytest.raises(ValueError, match="Transform must have"): + transformation._verify_input(np.array([1.0, 2.0, 3.0])) + + +class TestBaseTransformation: + """ + A class to test the abstract base transformation class. + """ + + @pytest.fixture + def ConcreteTransformation(self): + class ConcreteTransformation(pybop.Transformation): + def jacobian(self, q): + pass + + def _transform(self, q, method): + pass + + return ConcreteTransformation() + + @pytest.mark.unit + def test_abstract_base_transformation(self): + with pytest.raises(TypeError): + pybop.Transformation() + + @pytest.mark.unit + def test_abstract_methods(self): + abstract_methods = ["jacobian", "_transform"] + for method in abstract_methods: + assert hasattr(pybop.Transformation, method) + assert getattr(pybop.Transformation, method).__isabstractmethod__ + + @pytest.mark.unit + def test_concrete_methods(self): + concrete_methods = [ + "convert_covariance_matrix", + "convert_standard_deviation", + "log_jacobian_det", + "to_model", + "to_search", + ] + for method in concrete_methods: + assert hasattr(pybop.Transformation, method) + assert not inspect.isabstract(getattr(pybop.Transformation, method)) + + @pytest.mark.unit + def test_not_implemented_methods(self, ConcreteTransformation): + not_implemented_methods = [ + "jacobian_S1", + "log_jacobian_det_S1", + ] + for method_name in not_implemented_methods: + with pytest.raises(NotImplementedError): + method = getattr(ConcreteTransformation, method_name) + method(np.array([1.0])) + + with pytest.raises(NotImplementedError): + ConcreteTransformation.is_elementwise()