diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py index 1fcb4674..be99a369 100644 --- a/pybop/costs/_likelihoods.py +++ b/pybop/costs/_likelihoods.py @@ -139,7 +139,7 @@ def _evaluate(self, inputs: Inputs, grad=None): Returns: float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. """ - sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND + sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) if np.any(sigma <= 0): return -np.inf @@ -171,7 +171,7 @@ def _evaluateS1(self, inputs: Inputs, grad=None): Calls the problem.evaluateS1 method and calculates the log-likelihood """ - sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND + sigma = np.asarray([0.002]) # TEMPORARY WORKAROUND (replace in #338) if np.any(sigma <= 0): return -np.float64(np.inf), -self._dl * np.ones(self.n_parameters) diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index ba6de5db..88b6a36d 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -331,7 +331,8 @@ def _evaluate(self, inputs: Inputs, grad=None): """ log_likelihood = self.likelihood._evaluate(inputs) log_prior = sum( - self.parameters[key].prior.logpdf(inputs[key]) for key in inputs.keys() + self.parameters[key].prior.logpdf(value) for key, value in inputs.items() + ) posterior = log_likelihood + log_prior diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py index b1c4314a..79c56263 100644 --- a/pybop/models/base_model.py +++ b/pybop/models/base_model.py @@ -104,9 +104,7 @@ def build( The initial state of charge to be used in simulations. """ self.dataset = dataset - if parameters is None: - self.parameters = Parameters() - else: + if parameters is not None: self.parameters = parameters self.classify_and_update_parameters(self.parameters) @@ -466,7 +464,7 @@ def predict( Parameters ---------- - inputs : Inputse, optional + inputs : Inputs, optional Input parameters for the simulation. Defaults to None, indicating that the default parameters should be used. t_eval : array-like, optional diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 9f601629..ba433063 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -112,7 +112,7 @@ def set_base_options(self): """ Update the base optimiser options and remove them from the options dictionary. """ - # Set initial values + # Set initial values, if x0 is None, initial values are unmodified. self.parameters.update(initial_values=self.unset_options.pop("x0", None)) self.x0 = self.parameters.initial_value() diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index 5ae07657..00021dd8 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -426,7 +426,7 @@ def as_dict(self, values=None) -> Dict: values = self.true_value() return {key: values[i] for i, key in enumerate(self.param.keys())} - def verify(self, inputs=None): + def verify(self, inputs: Union[Inputs, None]=None): """ Verify that the inputs are an Inputs dictionary or numeric values which can be used to construct an Inputs dictionary diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index 8dcb1110..44142a68 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -77,7 +77,7 @@ def evaluate(self, inputs: Inputs): Parameters ---------- inputs : Inputs - Parameters for evaluation of the mmodel. + Parameters for evaluation of the model. Raises ------ @@ -94,7 +94,7 @@ def evaluateS1(self, inputs: Inputs): Parameters ---------- inputs : Inputs - Parameters for evaluation of the mmodel. + Parameters for evaluation of the model. Raises ------ diff --git a/tests/integration/test_optimisation_options.py b/tests/integration/test_optimisation_options.py index 4436c2a1..33143a78 100644 --- a/tests/integration/test_optimisation_options.py +++ b/tests/integration/test_optimisation_options.py @@ -118,6 +118,6 @@ def get_data(self, model, parameters, x, init_soc): * 2 ) sim = model.predict( - init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict(x) + init_soc=init_soc, experiment=experiment, inputs=x ) return sim diff --git a/tests/integration/test_spm_parameterisations.py b/tests/integration/test_spm_parameterisations.py index f070a09e..920e1b2a 100644 --- a/tests/integration/test_spm_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -246,6 +246,6 @@ def get_data(self, model, parameters, x, init_soc): * 2 ) sim = model.predict( - init_soc=init_soc, experiment=experiment, inputs=parameters.as_dict(x) + init_soc=init_soc, experiment=experiment, inputs=x ) return sim diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py index 185ab295..45df6ba4 100644 --- a/tests/integration/test_thevenin_parameterisation.py +++ b/tests/integration/test_thevenin_parameterisation.py @@ -102,5 +102,5 @@ def get_data(self, model, parameters, x): ), ] ) - sim = model.predict(experiment=experiment, inputs=parameters.as_dict(x)) + sim = model.predict(experiment=experiment, inputs=x) return sim diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py index 310d149b..b99aa5d0 100644 --- a/tests/unit/test_likelihoods.py +++ b/tests/unit/test_likelihoods.py @@ -131,7 +131,7 @@ def test_gaussian_log_likelihood(self, one_signal_problem): grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) assert isinstance(result, float) np.testing.assert_allclose(result, grad_result, atol=1e-5) - assert grad_likelihood[0] <= 0 # TEMPORARY WORKAROUND + assert grad_likelihood[0] <= 0 # TEMPORARY WORKAROUND (Remove in #338) @pytest.mark.unit def test_gaussian_log_likelihood_returns_negative_inf(self, one_signal_problem): diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index d8fdf4fa..6809aec8 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -357,8 +357,8 @@ def test_non_converged_solution(self): ) problem = pybop.FittingProblem(model, parameters=parameters, dataset=dataset) - res = problem.evaluate(parameters.as_dict([-0.2, -0.2])) - _, res_grad = problem.evaluateS1(parameters.as_dict([-0.2, -0.2])) + res = problem.evaluate([-0.2, -0.2]) + _, res_grad = problem.evaluateS1([-0.2, -0.2]) for key in problem.signal: assert np.isinf(res.get(key, [])).any() diff --git a/tests/unit/test_problem.py b/tests/unit/test_problem.py index 6fcb2203..1664e6ce 100644 --- a/tests/unit/test_problem.py +++ b/tests/unit/test_problem.py @@ -173,8 +173,8 @@ def test_design_problem(self, parameters, experiment, model): ) # building postponed with input experiment # Test model.predict - model.predict(inputs=parameters.as_dict([1e-5, 1e-5]), experiment=experiment) - model.predict(inputs=parameters.as_dict([3e-5, 3e-5]), experiment=experiment) + model.predict(inputs=[1e-5, 1e-5], experiment=experiment) + model.predict(inputs=[3e-5, 3e-5], experiment=experiment) @pytest.mark.unit def test_problem_construct_with_model_predict( @@ -183,7 +183,7 @@ def test_problem_construct_with_model_predict( # Construct model and predict model.parameters = parameters out = model.predict( - inputs=parameters.as_dict([1e-5, 1e-5]), t_eval=np.linspace(0, 10, 100) + inputs=[1e-5, 1e-5], t_eval=np.linspace(0, 10, 100) ) problem = pybop.FittingProblem( @@ -191,7 +191,7 @@ def test_problem_construct_with_model_predict( ) # Test problem evaluate - problem_output = problem.evaluate(parameters.as_dict([2e-5, 2e-5])) + problem_output = problem.evaluate([2e-5, 2e-5]) assert problem._model._built_model is not None with pytest.raises(AssertionError): diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py index edefd0ad..329ac47a 100644 --- a/tests/unit/test_standalone.py +++ b/tests/unit/test_standalone.py @@ -18,14 +18,14 @@ def test_standalone_optimiser(self): assert optim.name() == "StandaloneOptimiser" x, final_cost = optim.run() - assert optim.cost(optim.parameters.initial_value()) > final_cost + assert optim.cost(optim.x0) > final_cost np.testing.assert_allclose(x, [2, 4], atol=1e-2) # Test with bounds optim = StandaloneOptimiser(bounds=dict(upper=[5, 6], lower=[1, 2])) x, final_cost = optim.run() - assert optim.cost(optim.parameters.initial_value()) > final_cost + assert optim.cost(optim.x0) > final_cost np.testing.assert_allclose(x, [2, 4], atol=1e-2) @pytest.mark.unit @@ -35,7 +35,7 @@ def test_optimisation_on_standalone_cost(self): optim = pybop.SciPyDifferentialEvolution(cost=cost) x, final_cost = optim.run() - initial_cost = optim.cost(optim.parameters.initial_value()) + initial_cost = optim.cost(optim.x0) assert initial_cost > final_cost np.testing.assert_allclose(final_cost, 42, atol=1e-1)