diff --git a/CHANGELOG.md b/CHANGELOG.md index 80ed158f..567f88c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#357](https://github.com/pybop-team/PyBOP/pull/357) - Adds `Transformation()` class with `LogTransformation()`, `IdentityTransformation()`, and `ScaledTransformation()`, `ComposedTransformation()` implementations with corresponding examples and tests. - [#427](https://github.com/pybop-team/PyBOP/issues/427) - Adds the nbstripout pre-commit hook to remove unnecessary metadata from notebooks. - [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script. - [#393](https://github.com/pybop-team/PyBOP/pull/383) - Adds Minkowski and SumofPower cost classes, with an example and corresponding tests. diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py index ed38144a..4376fb46 100644 --- a/examples/scripts/spm_CMAES.py +++ b/examples/scripts/spm_CMAES.py @@ -13,12 +13,14 @@ prior=pybop.Gaussian(6e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Negative particle radius [m]"], + transformation=pybop.LogTransformation(), ), pybop.Parameter( "Positive particle radius [m]", prior=pybop.Gaussian(4.5e-06, 0.1e-6), bounds=[1e-6, 9e-6], true_value=parameter_set["Positive particle radius [m]"], + transformation=pybop.LogTransformation(), ), ) @@ -42,7 +44,7 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES(cost, max_iterations=100) +optim = pybop.CMAES(cost, sigma0=0.25, max_unchanged_iterations=20, max_iterations=50) # Run the optimisation x, final_cost = optim.run() diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 99917f3f..1103ea89 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -21,7 +21,7 @@ class StandaloneCost(pybop.BaseCost): Methods ------- - __call__(x, grad=None) + __call__(x) Calculate the cost for a given parameter value. """ @@ -43,7 +43,7 @@ def __init__(self, problem=None): ) self.x0 = self.parameters.initial_value() - def _evaluate(self, inputs, grad=None): + def _evaluate(self, inputs): """ Calculate the cost for a given parameter value. @@ -54,9 +54,6 @@ def _evaluate(self, inputs, grad=None): ---------- inputs : Dict The parameters for which to evaluate the cost. - grad : array-like, optional - Unused parameter, present for compatibility with gradient-based - optimizers. Returns ------- diff --git a/pybop/__init__.py b/pybop/__init__.py index 922a6480..ff8c08a8 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -55,6 +55,17 @@ # from ._dataset import Dataset +# +# Transformation classes +# +from .transformation.base_transformation import Transformation +from .transformation.transformations import ( + IdentityTransformation, + ScaledTransformation, + LogTransformation, + ComposedTransformation, +) + # # Parameter classes # diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 1f96e2fb..3b0a3631 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -39,7 +39,7 @@ def __init__(self, problem: BaseProblem, sigma0: Union[list[float], float]): self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi * self.sigma2) self._multip = -1 / (2.0 * self.sigma2) - def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: + def _evaluate(self, inputs: Inputs) -> float: """ Evaluates the Gaussian log-likelihood for the given parameters with known sigma. """ @@ -175,7 +175,7 @@ def dsigma_scale(self, new_value): raise ValueError("dsigma_scale must be non-negative") self._dsigma_scale = new_value - def _evaluate(self, inputs: Inputs, grad: Union[None, np.ndarray] = None) -> float: + def _evaluate(self, inputs: Inputs) -> float: """ Evaluates the Gaussian log-likelihood for the given parameters. @@ -296,7 +296,7 @@ def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-3): ): raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood") - def _evaluate(self, inputs: Inputs, grad=None) -> float: + def _evaluate(self, inputs: Inputs) -> float: """ Calculate the maximum a posteriori cost for a given set of parameters. @@ -304,9 +304,6 @@ def _evaluate(self, inputs: Inputs, grad=None) -> float: ---------- inputs : Inputs The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- diff --git a/pybop/costs/_weighted_cost.py b/pybop/costs/_weighted_cost.py index effa5a51..216e0303 100644 --- a/pybop/costs/_weighted_cost.py +++ b/pybop/costs/_weighted_cost.py @@ -60,7 +60,7 @@ def __init__(self, *args, weights: Optional[list[float]] = None): for cost in self.costs: self.parameters.join(cost.parameters) - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the weighted cost for a given set of parameters. @@ -68,9 +68,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -90,7 +87,7 @@ def _evaluate(self, inputs: Inputs, grad=None): cost._current_prediction = cost.problem.evaluate(inputs) else: cost._current_prediction = self._current_prediction - e[i] = cost._evaluate(inputs, grad) + e[i] = cost._evaluate(inputs) return np.dot(e, self.weights) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index eedbbc2c..116104b2 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -32,6 +32,7 @@ class BaseCost: def __init__(self, problem: Optional[BaseProblem] = None): self.parameters = Parameters() + self.transformation = None self.problem = problem self._fixed_problem = False self.set_fail_gradient() @@ -40,19 +41,20 @@ def __init__(self, problem: Optional[BaseProblem] = None): self.parameters.join(self.problem.parameters) self.n_outputs = self.problem.n_outputs self.signal = self.problem.signal + self.transformation = self.parameters.construct_transformation() self._fixed_problem = True @property def n_parameters(self): return len(self.parameters) - def __call__(self, inputs: Union[Inputs, list], grad=None): + def __call__(self, inputs: Union[Inputs, list]): """ Call the evaluate function for a given set of parameters. """ - return self.evaluate(inputs, grad) + return self.evaluate(inputs) - def evaluate(self, inputs: Union[Inputs, list], grad=None): + def evaluate(self, inputs: Union[Inputs, list]): """ Call the evaluate function for a given set of parameters. @@ -60,9 +62,6 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): ---------- inputs : Inputs or array-like The parameters for which to compute the cost and gradient. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -74,13 +73,15 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): ValueError If an error occurs during the calculation of the cost. """ - inputs = self.parameters.verify(inputs) + if self.transformation: + p = self.transformation.to_model(inputs) + inputs = self.parameters.verify(p if self.transformation else inputs) try: if self._fixed_problem: self._current_prediction = self.problem.evaluate(inputs) - return self._evaluate(inputs, grad) + return self._evaluate(inputs) except NotImplementedError as e: raise e @@ -88,7 +89,7 @@ def evaluate(self, inputs: Union[Inputs, list], grad=None): except Exception as e: raise ValueError(f"Error in cost calculation: {e}") from e - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the cost function value for a given set of parameters. @@ -98,9 +99,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -134,7 +132,9 @@ def evaluateS1(self, inputs: Union[Inputs, list]): ValueError If an error occurs during the calculation of the cost or gradient. """ - inputs = self.parameters.verify(inputs) + if self.transformation: + p = self.transformation.to_model(inputs) + inputs = self.parameters.verify(p if self.transformation else inputs) try: if self._fixed_problem: diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py index 738dfe61..1931bd33 100644 --- a/pybop/costs/design_costs.py +++ b/pybop/costs/design_costs.py @@ -65,7 +65,7 @@ def update_simulation_data(self, inputs: Inputs): self.problem._target = {key: solution[key] for key in self.problem.signal} self.dt = solution["Time [s]"][1] - solution["Time [s]"][0] - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Computes the value of the cost function. @@ -75,8 +75,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Raises ------ @@ -100,7 +98,7 @@ def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) self._fixed_problem = False # keep problem evaluation within _evaluate - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Computes the cost function for the energy density. @@ -108,8 +106,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Returns ------- @@ -157,7 +153,7 @@ def __init__(self, problem, update_capacity=False): super().__init__(problem, update_capacity) self._fixed_problem = False # keep problem evaluation within _evaluate - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Computes the cost function for the energy density. @@ -165,8 +161,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to compute the cost. - grad : array, optional - Gradient information, not used in this method. Returns ------- diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 18cc752d..ce68a9ca 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -20,7 +20,7 @@ class RootMeanSquaredError(BaseCost): def __init__(self, problem): super().__init__(problem) - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the root mean square error for a given set of parameters. @@ -115,7 +115,7 @@ class SumSquaredError(BaseCost): def __init__(self, problem): super().__init__(problem) - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the sum of squared errors for a given set of parameters. @@ -123,9 +123,6 @@ def _evaluate(self, inputs: Inputs, grad=None): ---------- inputs : Inputs The parameters for which to evaluate the cost. - grad : array-like, optional - An array to store the gradient of the cost function with respect - to the parameters. Returns ------- @@ -418,7 +415,7 @@ def __init__(self, observer: Observer): self._observer = observer self._fixed_problem = False # keep problem evaluation within _evaluate - def _evaluate(self, inputs: Inputs, grad=None): + def _evaluate(self, inputs: Inputs): """ Calculate the observer cost for a given set of parameters. diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 2b00b234..ba749a0e 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -58,6 +58,7 @@ def __init__( self.verbose = False self.log = dict(x=[], x_best=[], cost=[]) self.minimising = True + self.transformation = None self.physical_viability = False self.allow_infeasible_solutions = False self.default_max_iterations = 1000 @@ -65,6 +66,7 @@ def __init__( if isinstance(cost, BaseCost): self.cost = cost + self.transformation = self.cost.transformation self.parameters.join(cost.parameters) self.set_allow_infeasible_solutions() if isinstance(cost, (BaseLikelihood, DesignCost)): @@ -131,6 +133,9 @@ def set_base_options(self): # Set other options self.verbose = self.unset_options.pop("verbose", self.verbose) self.minimising = self.unset_options.pop("minimising", self.minimising) + self.transformation = self.unset_options.pop( + "transformation", self.transformation + ) if "allow_infeasible_solutions" in self.unset_options.keys(): self.set_allow_infeasible_solutions( self.unset_options.pop("allow_infeasible_solutions") diff --git a/pybop/optimisers/base_pints_optimiser.py b/pybop/optimisers/base_pints_optimiser.py index f5698c8e..8bf856c4 100644 --- a/pybop/optimisers/base_pints_optimiser.py +++ b/pybop/optimisers/base_pints_optimiser.py @@ -48,9 +48,6 @@ def __init__(self, cost, pints_optimiser, **optimiser_kwargs): self._evaluations = None self._iterations = None - # PyBOP doesn't currently support the PINTS transformation class - self._transformation = None - self.pints_optimiser = pints_optimiser super().__init__(cost, **optimiser_kwargs) @@ -200,8 +197,8 @@ def f(x): return (L, dl) if self.minimising else (-L, -dl) else: - def f(x, grad=None): - return self.cost(x, grad) if self.minimising else -self.cost(x, grad) + def f(x): + return self.cost(x) if self.minimising else -self.cost(x) # Create evaluator object if self._parallel: @@ -325,8 +322,8 @@ def f(x, grad=None): # Show current parameters x_user = self.pints_optimiser.x_guessed() - if self._transformation is not None: - x_user = self._transformation.to_model(x_user) + if self.transformation is not None: + x_user = self.transformation.to_model(x_user) for p in x_user: print(PintsStrFloat(p)) print("-" * 40) @@ -348,8 +345,8 @@ def f(x, grad=None): f = self.pints_optimiser.f_best() # Inverse transform search parameters - if self._transformation is not None: - x = self._transformation.to_model(x) + if self.transformation is not None: + x = self.transformation.to_model(x) return Result( x=x, final_cost=f if self.minimising else -f, n_iterations=self._iterations diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 2614027c..cb1a1936 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -4,6 +4,7 @@ import numpy as np +from pybop import ComposedTransformation, IdentityTransformation from pybop._utils import is_numeric Inputs = dict[str, float] @@ -37,7 +38,13 @@ class Parameter: """ def __init__( - self, name, initial_value=None, true_value=None, prior=None, bounds=None + self, + name, + initial_value=None, + true_value=None, + prior=None, + bounds=None, + transformation=None, ): """ Construct the parameter class with a name, initial value, prior, and bounds. @@ -47,6 +54,7 @@ def __init__( self.true_value = true_value self.initial_value = initial_value self.value = initial_value + self.transformation = transformation self.applied_prior_bounds = False self.set_bounds(bounds) self.margin = 1e-4 @@ -70,6 +78,9 @@ def rvs(self, n_samples: int = 1, random_state=None): """ samples = self.prior.rvs(n_samples, random_state=random_state) + if self.transformation is not None: + samples = self.transformation.to_search(samples) + # Constrain samples to be within bounds if self.bounds is not None: offset = self.margin * (self.upper_bound - self.lower_bound) @@ -151,17 +162,27 @@ def set_bounds(self, bounds=None, boundary_multiplier=6): if bounds is not None: if bounds[0] >= bounds[1]: raise ValueError("Lower bound must be less than upper bound") + elif self.transformation is not None: + self.lower_bound = np.ndarray.item( + self.transformation.to_search(bounds[0]) + ) + self.upper_bound = np.ndarray.item( + self.transformation.to_search(bounds[1]) + ) else: self.lower_bound = bounds[0] self.upper_bound = bounds[1] + elif self.prior is not None: self.applied_prior_bounds = True self.lower_bound = self.prior.mean - boundary_multiplier * self.prior.sigma self.upper_bound = self.prior.mean + boundary_multiplier * self.prior.sigma - bounds = [self.lower_bound, self.upper_bound] print("Default bounds applied based on prior distribution.") + else: + self.bounds = None + return - self.bounds = bounds + self.bounds = [self.lower_bound, self.upper_bound] def get_initial_value(self) -> float: """ @@ -191,6 +212,7 @@ def __init__(self, *args): self.param = OrderedDict() for param in args: self.add(param) + self.initial_value() def __getitem__(self, key: str) -> Parameter: """ @@ -364,12 +386,20 @@ def get_sigma0(self) -> list: for param in self.param.values(): if hasattr(param.prior, "sigma"): - sigma0.append(param.prior.sigma) + if param.transformation is None: + sigma0.append(param.prior.sigma) + else: + sigma0.append( + np.ndarray.item( + param.transformation.convert_standard_deviation( + param.prior.sigma, param.initial_value + ) + ) + ) else: all_have_sigma = False if not all_have_sigma: sigma0 = None - return sigma0 def initial_value(self) -> np.ndarray: @@ -380,8 +410,15 @@ def initial_value(self) -> np.ndarray: for param in self.param.values(): if param.initial_value is None: - initial_value = param.rvs(1)[0] - param.update(initial_value=initial_value) + if param.prior is not None: + initial_value = param.rvs(1)[0] + param.update(initial_value=initial_value) + else: + warnings.warn( + "Initial value or Prior are None, proceeding without initial value.", + UserWarning, + stacklevel=2, + ) initial_values.append(param.initial_value) return np.asarray(initial_values) @@ -408,6 +445,30 @@ def true_value(self) -> np.ndarray: return np.asarray(true_values) + def get_transformations(self): + """ + Get the transformations for each parameter. + """ + transformations = [] + + for param in self.param.values(): + transformations.append(param.transformation) + + return transformations + + def construct_transformation(self): + """ + Create a ComposedTransformation object from the individual parameter transformations. + """ + transformations = self.get_transformations() + if not transformations or all(t is None for t in transformations): + return None + + valid_transformations = [ + t if t is not None else IdentityTransformation() for t in transformations + ] + return ComposedTransformation(valid_transformations) + def get_bounds_for_plotly(self): """ Retrieve parameter bounds in the format expected by Plotly. diff --git a/pybop/transformation/base_transformation.py b/pybop/transformation/base_transformation.py new file mode 100644 index 00000000..c9905775 --- /dev/null +++ b/pybop/transformation/base_transformation.py @@ -0,0 +1,160 @@ +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import Union + +import numpy as np + + +class Transformation(ABC): + """ + Abstract base class for transformations between two parameter spaces: the model + parameter space and a search space. + + If `transform` is an instance of a `Transformation` class, you can apply the + transformation of a parameter vector from the model space `p` to the search + space `q` using `q = transform.to_search(p)` and the inverse using `p = transform.to_model(q)`. + + Based on pints.transformation method. + + References + ---------- + .. [1] Erik Jorgensen and Asger Roer Pedersen. "How to Obtain Those Nasty Standard Errors From Transformed Data." + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.9023 + .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. + """ + + # ---- To be implemented with Monte Carlo PR ------ # + # def convert_log_prior(self, log_prior): + # """Returns a transformed log-prior class.""" + # return TransformedLogPrior(log_prior, self) + + def convert_covariance_matrix(self, cov: np.ndarray, q: np.ndarray) -> np.ndarray: + """ + Converts a covariance matrix `covariance` from the model space to the search space + around a parameter vector `q` in the search space. + """ + jac_inv = np.linalg.pinv(self.jacobian(q)) + return jac_inv @ cov @ jac_inv.T + + def convert_standard_deviation( + self, std: Union[float, np.ndarray], q: np.ndarray + ) -> np.ndarray: + """ + Converts standard deviation `std`, either a scalar or a vector, from the model space + to the search space around a parameter vector `q` in the search space. + """ + if isinstance(q, (int, float)): + q = np.asarray([q]) + jac_inv = np.linalg.pinv(self.jacobian(q)) + cov = jac_inv @ jac_inv.T + return std * np.sqrt(np.diagonal(cov)) + + @abstractmethod + def jacobian(self, q: np.ndarray) -> np.ndarray: + """Returns the Jacobian matrix of the transformation at the parameter vector `q`.""" + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, Sequence[np.ndarray]]: + """ + Computes the Jacobian matrix and its partial derivatives at the parameter vector `q`. + + Returns a tuple `(jacobian, hessian)`. + """ + raise NotImplementedError("jacobian_S1 method must be implemented if used.") + + def log_jacobian_det(self, q: np.ndarray) -> float: + """ + Returns the logarithm of the absolute value of the determinant of the Jacobian matrix + at the parameter vector `q`. + """ + raise NotImplementedError( + "log_jacobian_det method must be implemented if used." + ) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """ + Computes the logarithm of the absolute value of the determinant of the Jacobian, + and returns it along with its partial derivatives. + """ + raise NotImplementedError( + "log_jacobian_det_S1 method must be implemented if used." + ) + + @property + def n_parameters(self): + return self._n_parameters + + def to_model(self, q: np.ndarray) -> np.ndarray: + """Transforms a parameter vector `q` from the search space to the model space.""" + return self._transform(q, method="to_model") + + def to_search(self, p: np.ndarray) -> np.ndarray: + """Transforms a parameter vector `p` from the model space to the search space.""" + return self._transform(p, method="to_search") + + @abstractmethod + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """ + Transforms a parameter vector `x` from the search space to the model space if `method` + is "to_model", or from the model space to the search space if `method` is "to_search". + """ + + def is_elementwise(self) -> bool: + """ + Returns `True` if the transformation is element-wise, meaning it can be applied + element-by-element independently. + """ + raise NotImplementedError("is_elementwise method must be implemented if used.") + + def _verify_input( + self, inputs: Union[float, int, list[float], np.ndarray, dict[str, float]] + ) -> np.ndarray: + """Set and validate the transformation parameter.""" + if isinstance(inputs, (float, int)): + return np.full(self._n_parameters, float(inputs)) + + if isinstance(inputs, dict): + inputs = list(inputs.values()) + + try: + input_array = np.asarray(inputs, dtype=float) + except (ValueError, TypeError) as e: + raise TypeError( + "Transform must be a float, int, list, numpy array, or dictionary" + ) from e + + if input_array.size != self._n_parameters: + raise ValueError(f"Transform must have {self._n_parameters} elements") + + return input_array + + +# ---- To be implemented with Monte Carlo PR ------ # +# class TransformedLogPDF(BaseCost): +# """Transformed log-PDF class.""" +# def __init__(self, log_pdf, transformation): +# self._log_pdf = log_pdf +# self._transformation = transformation + +# def __call__(self, q): +# p = self._transformation.to_model(q) +# log_pdf = self._log_pdf(p) + +# # Calculate the PDF using change of variable +# # Wikipedia: https://w.wiki/UsJ +# log_jacobian_det = self._transformation.log_jacobian_det(q) +# return log_pdf + log_jacobian_det + +# def _evaluateS1(self, x): +# p = self._transformation.to_model(x) +# log_pdf, log_pdf_derivatives = self._log_pdf._evaluateS1(p) +# log_jacobian_det, log_jacobian_det_derivatives = self._transformation.log_jacobian_det_S1(x) +# return log_pdf + log_jacobian_det, log_pdf_derivatives + log_jacobian_det_derivatives + +# class TransformedLogPrior: +# """Transformed log-prior class.""" +# def __init__(self, log_prior, transformation): +# self._log_prior = log_prior +# self._transformation = transformation + +# def __call__(self, q): +# return self diff --git a/pybop/transformation/transformations.py b/pybop/transformation/transformations.py new file mode 100644 index 00000000..782fe46b --- /dev/null +++ b/pybop/transformation/transformations.py @@ -0,0 +1,364 @@ +from typing import Union + +import numpy as np + +from pybop import Transformation + + +class IdentityTransformation(Transformation): + """ + This class implements a trivial transformation where the model parameter space + and the search space are identical. It extends the base Transformation class. + + The transformation is defined as: + - to_search: y = x + - to_model: x = y + + Key properties: + 1. Jacobian: Identity matrix + 2. Log determinant of Jacobian: Always 0 + 3. Elementwise: True (each output dimension depends only on the corresponding input dimension) + + Use cases: + 1. When no transformation is needed between spaces + 2. As a placeholder in composite transformations + 3. For testing and benchmarking other transformations + + Note: While this transformation doesn't change the parameters, it still provides + all the methods required by the Transformation interface, making it useful in + scenarios where a transformation object is expected but no actual transformation + is needed. + + Initially based on pints.IdentityTransformation method. + """ + + def __init__(self, n_parameters: int = 1): + self._n_parameters = n_parameters + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.eye(self._n_parameters) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + n = self._n_parameters + return self.jacobian(q), np.zeros((n, n, n)) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return 0.0 + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), np.zeros(self._n_parameters) + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + return np.asarray(x) + + +class ScaledTransformation(Transformation): + """ + This class implements a linear transformation between the model parameter space + and a search space, using a coefficient (scale factor) and an intercept (offset). + It extends the base Transformation class. + + The transformation is defined as: + - to_search: y = coefficient * (x + intercept) + - to_model: x = y / coefficient - intercept + + Where: + - x is in the model parameter space + - y is in the search space + - coefficient is the scaling factor + - intercept is the offset + + This transformation is useful for scaling and shifting parameters to a more + suitable range for optimisation algorithms. + + Based on pints.ScaledTransformation class. + """ + + def __init__( + self, + coefficient: Union[list, float, np.ndarray], + intercept: Union[list, float, np.ndarray] = 0, + n_parameters: int = 1, + ): + self._n_parameters = n_parameters + self._intercept = self._verify_input(intercept) + self._coefficient = self._verify_input(coefficient) + self._inverse_coeff = np.reciprocal(self._coefficient) + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.diag(self._inverse_coeff) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + n = self._n_parameters + return self.jacobian(q), np.zeros((n, n, n)) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return np.sum(np.log(np.abs(self._coefficient))) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), np.zeros(self._n_parameters) + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + x = self._verify_input(x) + if method == "to_model": + return x * self._inverse_coeff - self._intercept + elif method == "to_search": + return self._coefficient * (x + self._intercept) + else: + raise ValueError(f"Unknown method: {method}") + + +class LogTransformation(Transformation): + """ + This class implements a logarithmic transformation between the model parameter space + and a search space. It extends the base Transformation class. + + The transformation is defined as: + - to_search: y = log(x) + - to_model: x = exp(y) + + Where: + - x is in the model parameter space (strictly positive) + - y is in the search space (can be any real number) + + This transformation is particularly useful for: + 1. Parameters that are strictly positive and may span several orders of magnitude. + 2. Converting multiplicative processes to additive ones in the search space. + 3. Ensuring positivity constraints without explicit bounds in optimisation. + + Note: Care should be taken when using this transformation, as it can introduce + bias in the parameter estimates if not accounted for properly in the likelihood + or cost function. Simply, E[log(x)] <= log(E[x]) as per to Jensen's inequality. + For more information, see Jensen's inequality: + https://en.wikipedia.org/w/index.php?title=Jensen%27s_inequality&oldid=1212437916#Probabilistic_form + + Initially based on pints.LogTransformation class. + """ + + def __init__(self, n_parameters: int = 1): + self._n_parameters = n_parameters + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return True + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """See :meth:`Transformation.jacobian()`.""" + return np.diag(1 / q) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + return np.diag(1 / q), np.diag(-1 / q**2) + + def log_jacobian_det(self, q: np.ndarray) -> float: + """See :meth:`Transformation.log_jacobian_det()`.""" + return np.sum(-np.log(q)) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """See :meth:`Transformation.log_jacobian_det_S1()`.""" + return self.log_jacobian_det(q), -1 / q + + def _transform(self, x: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + x = self._verify_input(x) + if method == "to_model": + return np.exp(x) + elif method == "to_search": + return np.log(x) + else: + raise ValueError(f"Unknown method: {method}") + + +class ComposedTransformation(Transformation): + """ + N-dimensional Transformation composed of one or more other N_i-dimensional + sub-transformations, where the sum of N_i equals N. + + This class allows for the composition of multiple transformations, each potentially + operating on a different number of dimensions. The total dimensionality of the + composed transformation is the sum of the dimensionalities of its components. + + The dimensionality of the individual transformations does not have to be + the same, i.e., N_i != N_j is allowed. + + Extends pybop.Transformation. Initially based on pints.ComposedTransformation class. + """ + + def __init__(self, transformations: list[Transformation]): + if not transformations: + raise ValueError("Must have at least one sub-transformation.") + self._transformations = [] + self._n_parameters = 0 + self._is_elementwise = True + for transformation in transformations: + self._append_transformation(transformation) + + def _append_transformation(self, transformation: Transformation): + """ + Append a transformation to the internal list of transformations. + + Parameters + ---------- + transformation : Transformation + Transformation to append. + + Raises + ------ + ValueError + If the appended object is not a Transformation. + """ + if not isinstance(transformation, Transformation): + raise TypeError("The appended object must be a Transformation.") + self._transformations.append(transformation) + self._n_parameters += transformation.n_parameters + self._is_elementwise = self._is_elementwise and transformation.is_elementwise() + + def append(self, transformation: Transformation): + """ + Append a new transformation to the existing composition. + + Parameters + ---------- + transformation : Transformation + The transformation to append. + """ + self._append_transformation(transformation) + + def is_elementwise(self) -> bool: + """See :meth:`Transformation.is_elementwise()`.""" + return self._is_elementwise + + def jacobian(self, q: np.ndarray) -> np.ndarray: + """ + Compute the elementwise Jacobian of the composed transformation. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + np.ndarray + Diagonal matrix representing the elementwise Jacobian. + """ + q = self._verify_input(q) + diag = np.zeros(self._n_parameters) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + diag[lo:hi] = np.diagonal(transformation.jacobian(q[lo:hi])) + lo = hi + + return np.diag(diag) + + def jacobian_S1(self, q: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """See :meth:`Transformation.jacobian_S1()`.""" + q = self._verify_input(q) + output_S1 = np.zeros( + (self._n_parameters, self._n_parameters, self._n_parameters) + ) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + _, jac_S1 = transformation.jacobian_S1(q[lo:hi]) + for i, jac_S1_i in enumerate(jac_S1): + output_S1[lo + i, lo:hi, lo:hi] = jac_S1_i + lo = hi + + return self.jacobian(q), output_S1 + + def log_jacobian_det(self, q: np.ndarray) -> float: + """ + Compute the elementwise logarithm of the determinant of the Jacobian. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + float + Sum of log determinants of individual transformations. + """ + q = self._verify_input(q) + return sum( + transformation.log_jacobian_det(q[lo : lo + transformation.n_parameters]) + for lo, transformation in self._iter_transformations() + ) + + def log_jacobian_det_S1(self, q: np.ndarray) -> tuple[float, np.ndarray]: + """ + Compute the elementwise logarithm of the determinant of the Jacobian and its first-order sensitivities. + + Parameters + ---------- + q : np.ndarray + Input array. + + Returns + ------- + Tuple[float, np.ndarray] + Tuple of sum of log determinants and concatenated first-order sensitivities. + """ + q = self._verify_input(q) + output = 0.0 + output_S1 = np.zeros(self._n_parameters) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + j, j_S1 = transformation.log_jacobian_det_S1(q[lo:hi]) + output += j + output_S1[lo:hi] = j_S1 + lo = hi + + return output, output_S1 + + def _transform(self, data: np.ndarray, method: str) -> np.ndarray: + """See :meth:`Transformation._transform`.""" + data = self._verify_input(data) + output = np.zeros_like(data) + lo = 0 + + for transformation in self._transformations: + hi = lo + transformation.n_parameters + output[lo:hi] = getattr(transformation, method)(data[lo:hi]) + lo = hi + + return output + + def _iter_transformations(self): + """ + Iterate over the transformations in the composition. + + Yields + ------ + Tuple[int, Transformation] + Tuple of starting index and transformation object for each sub-transformation. + """ + lo = 0 + for transformation in self._transformations: + yield lo, transformation + lo += transformation.n_parameters diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index 47782a03..34a6e8c6 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -104,6 +104,8 @@ def test_optimisation_f_guessed(self, f_guessed, spm_costs): assert initial_cost > final_cost else: assert initial_cost < final_cost + else: + raise ValueError("Initial value is the same as the ground truth value.") np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x, init_soc): diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index 5e9d6b00..b3188e19 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -196,7 +196,6 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): # Test each optimiser optim = multi_optimiser( cost=spm_two_signal_cost, - sigma0=0.03, max_iterations=250, ) diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 98dde5cb..fbb9f6cb 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -90,6 +90,8 @@ def test_optimisers_on_simple_model(self, optimiser, cost): assert initial_cost > final_cost else: assert initial_cost < final_cost + else: + raise ValueError("Initial value is the same as the ground truth value.") np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) def get_data(self, model, parameters, x): diff --git a/tests/integration/test_transformation.py b/tests/integration/test_transformation.py new file mode 100644 index 00000000..4ca5a0c1 --- /dev/null +++ b/tests/integration/test_transformation.py @@ -0,0 +1,103 @@ +import numpy as np +import pytest + +import pybop + + +class TestTransformation: + """ + A class for integration tests of the transformation methods. + """ + + @pytest.fixture(autouse=True) + def setup(self): + self.ground_truth = np.array([0.5, 0.2]) + np.random.normal( + loc=0.0, scale=0.05, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet.pybamm("Chen2020") + return pybop.lithium_ion.SPMe(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Uniform(0.4, 0.7), + bounds=[0.375, 0.725], + transformation=pybop.LogTransformation(), + ), + pybop.Parameter( + "Positive electrode conductivity [S.m-1]", + prior=pybop.Uniform(0.05, 0.45), + bounds=[0.04, 0.5], + transformation=pybop.LogTransformation(), + ), + ) + + @pytest.fixture(params=[0.4, 0.7]) + def init_soc(self, request): + return request.param + + def noise(self, sigma, values): + return np.random.normal(0, sigma, values) + + @pytest.fixture + def cost(self, model, parameters, init_soc): + # Form dataset + solution = self.get_data(model, parameters, self.ground_truth, init_soc) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data + + self.noise(0.002, len(solution["Time [s]"].data)), + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc) + return pybop.RootMeanSquaredError(problem) + + @pytest.mark.parametrize( + "optimiser", + [ + pybop.AdamW, + pybop.CMAES, + ], + ) + @pytest.mark.integration + def test_spm_optimisers(self, optimiser, cost): + x0 = cost.parameters.initial_value() + optim = optimiser( + cost=cost, + max_unchanged_iterations=35, + max_iterations=125, + ) + + initial_cost = optim.cost(x0) + x, final_cost = optim.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + else: + raise ValueError("Initial value is the same as the ground truth value.") + + def get_data(self, model, parameters, x, init_soc): + model.parameters = parameters + experiment = pybop.Experiment( + [ + ( + "Discharge at 1C for 3 minutes (4 second period)", + "Charge at 1C for 3 minutes (4 second period)", + ), + ] + ) + sim = model.predict( + init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict(x) + ) + return sim diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index aa68cc0e..56ae1e44 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -85,7 +85,7 @@ def test_base_likelihood_call_raises_not_implemented_error( ): likelihood = pybop.BaseLikelihood(one_signal_problem) with pytest.raises(NotImplementedError): - likelihood(np.array([0.5, 0.5])) + likelihood(np.array([0.5])) @pytest.mark.unit def test_likelihood_check_sigma0(self, one_signal_problem): @@ -128,8 +128,8 @@ def test_gaussian_log_likelihood_known_sigma(self, problem_name, request): @pytest.mark.unit def test_gaussian_log_likelihood(self, one_signal_problem): likelihood = pybop.GaussianLogLikelihood(one_signal_problem) - result = likelihood(np.array([0.5, 0.5])) - grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) + result = likelihood(np.array([0.8, 0.2])) + grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.8, 0.2])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) assert np.all(grad_likelihood <= 0) diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 90c43622..e48cda8e 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -1,3 +1,4 @@ +import numpy as np import pytest import pybop @@ -212,3 +213,19 @@ def test_get_sigma(self, parameter): parameter.prior = None assert params.get_sigma0() is None + + @pytest.mark.unit + def test_initial_values_without_attributes(self): + # Test without initial values + parameter = pybop.Parameters( + pybop.Parameter( + "Negative electrode conductivity [S.m-1]", + ) + ) + with pytest.warns( + UserWarning, + match="Initial value or Prior are None, proceeding without initial value.", + ): + sample = parameter.initial_value() + + np.testing.assert_equal(sample, np.array([None])) diff --git a/tests/unit/test_transformations.py b/tests/unit/test_transformations.py new file mode 100644 index 00000000..9a86d1f0 --- /dev/null +++ b/tests/unit/test_transformations.py @@ -0,0 +1,231 @@ +import inspect + +import numpy as np +import pytest + +import pybop + + +class TestTransformation: + """ + A class to test the transformations. + """ + + @pytest.fixture + def parameters(self): + return pybop.Parameters( + pybop.Parameter( + "Identity", + transformation=pybop.IdentityTransformation(), + ), + pybop.Parameter( + "Scaled", + transformation=pybop.ScaledTransformation(coefficient=2.0, intercept=1), + ), + pybop.Parameter( + "Log", + transformation=pybop.LogTransformation(), + ), + ) + + @pytest.mark.unit + def test_identity_transformation(self, parameters): + q = np.asarray([5.0]) + transformation = parameters["Identity"].transformation + assert np.array_equal(transformation.to_model(q), q) + assert np.array_equal(transformation.to_search(q), q) + assert transformation.log_jacobian_det(q) == 0.0 + assert transformation.log_jacobian_det_S1(q) == (0.0, np.zeros(1)) + assert transformation.n_parameters == 1 + assert transformation.is_elementwise() + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.eye(1)) + assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + + # Test covariance transformation + cov = np.array([[0.5]]) + q = np.array([5.0]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov) + + @pytest.mark.unit + def test_scaled_transformation(self, parameters): + q = np.asarray([2.5]) + transformation = parameters["Scaled"].transformation + p = transformation.to_model(q) + assert np.allclose(p, (q / 2.0) - 1.0) + assert transformation.n_parameters == 1 + assert transformation.is_elementwise() + + q_transformed = transformation.to_search(p) + assert np.allclose(q_transformed, q) + assert np.allclose( + transformation.log_jacobian_det(q), np.sum(np.log(np.abs(2.0))) + ) + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == np.sum(np.log(np.abs(2.0))) + assert log_jac_det_S1[1] == np.zeros(1) + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.diag([0.5])) + assert np.array_equal(jac_S1, np.zeros((1, 1, 1))) + + # Test covariance transformation + cov = np.array([[0.5]]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov * transformation._coefficient**2) + + # Test incorrect transform + with pytest.raises(ValueError, match="Unknown method:"): + transformation._transform(q, "bad-string") + + @pytest.mark.unit + def test_log_transformation(self, parameters): + q = np.asarray([10]) + transformation = parameters["Log"].transformation + p = transformation.to_model(q) + assert np.allclose(p, np.exp(q)) + assert transformation.n_parameters == 1 + + q_transformed = transformation.to_search(p) + assert np.allclose(q_transformed, q) + assert np.allclose(transformation.log_jacobian_det(q), -np.sum(np.log(q))) + + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == -np.sum(np.log(q)) + assert log_jac_det_S1[1] == -1 / q + + jac, jac_S1 = transformation.jacobian_S1(q) + assert np.array_equal(jac, np.diag(1 / q)) + assert np.array_equal(jac_S1, np.diag(-1 / q**2)) + + # Test covariance transformation + cov = np.array([[0.5]]) + cov_transformed = transformation.convert_covariance_matrix(cov, q) + assert np.array_equal(cov_transformed, cov * q**2) + + # Test incorrect transform + with pytest.raises(ValueError, match="Unknown method:"): + transformation._transform(q, "bad-string") + + @pytest.mark.unit + def test_composed_transformation(self, parameters): + # Test elementwise transformations + transformation = pybop.ComposedTransformation( + [parameters["Identity"].transformation, parameters["Scaled"].transformation] + ) + # Test appending transformations + transformation.append(parameters["Log"].transformation) + assert transformation.is_elementwise() is True + + q = np.asarray([5.0, 2.5, 10]) + p = transformation.to_model(q) + np.testing.assert_allclose( + p, np.asarray([5.0, ((q[1] / 2.0) - 1.0), np.exp(q[2])]) + ) + jac = transformation.jacobian(q) + jac_S1 = transformation.jacobian_S1(q) + np.testing.assert_allclose( + jac, np.asarray([[1, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.1]]) + ) + np.testing.assert_allclose(jac_S1[0], jac) + assert jac_S1[1].shape == (3, 3, 3) + assert jac_S1[1][2, 2, 2] == -0.01 + np.testing.assert_allclose(jac_S1[1][0, :, :], np.zeros((3, 3))) + np.testing.assert_allclose(jac_S1[1][1, :, :], np.zeros((3, 3))) + + correct_output = np.sum(np.log(np.abs(2.0))) + np.sum(-np.log(10)) + log_jac_det = transformation.log_jacobian_det(q) + assert log_jac_det == correct_output + + log_jac_det_S1 = transformation.log_jacobian_det_S1(q) + assert log_jac_det_S1[0] == correct_output + np.testing.assert_allclose(log_jac_det_S1[1], np.asarray([0.0, 0.0, -0.1])) + + # Test composed with no transformations + with pytest.raises( + ValueError, match="Must have at least one sub-transformation." + ): + pybop.ComposedTransformation([]) + + # Test incorrect append object + with pytest.raises( + TypeError, match="The appended object must be a Transformation." + ): + pybop.ComposedTransformation( + [parameters["Identity"].transformation, "string"] + ) + + @pytest.mark.unit + def test_verify_input(self, parameters): + q = np.asarray([5.0]) + q_dict = {"Identity": q[0]} + transformation = parameters["Scaled"].transformation + assert transformation._verify_input(q) == q + assert transformation._verify_input(q.tolist()) == q + assert transformation._verify_input(q_dict) == q + + with pytest.raises( + TypeError, match="Transform must be a float, int, list, numpy array," + ): + transformation._verify_input("string") + + with pytest.raises(ValueError, match="Transform must have"): + transformation._verify_input(np.array([1.0, 2.0, 3.0])) + + +class TestBaseTransformation: + """ + A class to test the abstract base transformation class. + """ + + @pytest.fixture + def ConcreteTransformation(self): + class ConcreteTransformation(pybop.Transformation): + def jacobian(self, q): + pass + + def _transform(self, q, method): + pass + + return ConcreteTransformation() + + @pytest.mark.unit + def test_abstract_base_transformation(self): + with pytest.raises(TypeError): + pybop.Transformation() + + @pytest.mark.unit + def test_abstract_methods(self): + abstract_methods = ["jacobian", "_transform"] + for method in abstract_methods: + assert hasattr(pybop.Transformation, method) + assert getattr(pybop.Transformation, method).__isabstractmethod__ + + @pytest.mark.unit + def test_concrete_methods(self): + concrete_methods = [ + "convert_covariance_matrix", + "convert_standard_deviation", + "log_jacobian_det", + "to_model", + "to_search", + ] + for method in concrete_methods: + assert hasattr(pybop.Transformation, method) + assert not inspect.isabstract(getattr(pybop.Transformation, method)) + + @pytest.mark.unit + def test_not_implemented_methods(self, ConcreteTransformation): + not_implemented_methods = [ + "jacobian_S1", + "log_jacobian_det_S1", + ] + for method_name in not_implemented_methods: + with pytest.raises(NotImplementedError): + method = getattr(ConcreteTransformation, method_name) + method(np.array([1.0])) + + with pytest.raises(NotImplementedError): + ConcreteTransformation.is_elementwise()