diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c43cbee0..7bfc4528f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#218](https://github.com/pybop-team/PyBOP/pull/218) - Adds likelihood base class, `GaussianLogLikelihoodKnownSigma`, `GaussianLogLikelihood`, and `ProbabilityBased` cost function. As well as addition of a maximum likelihood estimation (MLE) example. - [#185](https://github.com/pybop-team/PyBOP/pull/185) - Adds a pull request template, additional nox sessions `quick` for standard tests + docs, `pre-commit` for pre-commit, `test` to run all standard tests, `doctest` for docs. - [#215](https://github.com/pybop-team/PyBOP/pull/215) - Adds `release_workflow.md` and updates `release_action.yaml` - [#204](https://github.com/pybop-team/PyBOP/pull/204) - Splits integration, unit, examples, plots tests, update workflows. Adds pytest `--examples`, `--integration`, `--plots` args. Adds tests for coverage after removal of examples. Adds examples and integrations nox sessions. Adds `pybop.RMSE._evaluateS1()` method diff --git a/examples/scripts/spm_IRPropMin.py b/examples/scripts/spm_IRPropMin.py index 107d96200..1f5208ce7 100644 --- a/examples/scripts/spm_IRPropMin.py +++ b/examples/scripts/spm_IRPropMin.py @@ -17,6 +17,7 @@ ), ] +# Generate data sigma = 0.001 t_eval = np.arange(0, 900, 2) values = model.predict(t_eval=t_eval) diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py new file mode 100644 index 000000000..7aa4254a7 --- /dev/null +++ b/examples/scripts/spm_MLE.py @@ -0,0 +1,70 @@ +import pybop +import numpy as np + +# Define model +parameter_set = pybop.ParameterSet.pybamm("Chen2020") +model = pybop.lithium_ion.SPM(parameter_set=parameter_set) + +# Fitting parameters +parameters = [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.6, 0.05), + bounds=[0.5, 0.8], + ), + pybop.Parameter( + "Positive electrode active material volume fraction", + prior=pybop.Gaussian(0.48, 0.05), + bounds=[0.4, 0.7], + ), +] + +# Set initial parameter values +parameter_set.update( + { + "Negative electrode active material volume fraction": 0.63, + "Positive electrode active material volume fraction": 0.51, + } +) +# Generate data +sigma = 0.005 +t_eval = np.arange(0, 900, 2) +values = model.predict(t_eval=t_eval) +corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval)) + +# Form dataset +dataset = pybop.Dataset( + { + "Time [s]": t_eval, + "Current function [A]": values["Current [A]"].data, + "Voltage [V]": corrupt_values, + } +) + +# Generate problem, cost function, and optimisation class +problem = pybop.FittingProblem(model, parameters, dataset) +likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=[0.03, 0.03]) +optim = pybop.Optimisation(likelihood, optimiser=pybop.CMAES) +optim.set_max_unchanged_iterations(20) +optim.set_min_iterations(20) +optim.set_max_iterations(100) + +# Run the optimisation +x, final_cost = optim.run() +print("Estimated parameters:", x) + +# Plot the timeseries output +pybop.quick_plot(x[0:2], likelihood, title="Optimised Comparison") + +# Plot convergence +pybop.plot_convergence(optim) + +# Plot the parameter traces +pybop.plot_parameters(optim) + +# Plot the cost landscape +pybop.plot_cost2d(likelihood, steps=15) + +# Plot the cost landscape with optimisation path and updated bounds +bounds = np.array([[0.55, 0.77], [0.48, 0.68]]) +pybop.plot_cost2d(likelihood, optim=optim, bounds=bounds, steps=15) diff --git a/examples/standalone/cost.py b/examples/standalone/cost.py index 9836a7e7d..d632d3f1d 100644 --- a/examples/standalone/cost.py +++ b/examples/standalone/cost.py @@ -18,7 +18,7 @@ class StandaloneCost(pybop.BaseCost): BaseCost interface. x0 : array-like The initial guess for the optimization problem, set to [4.2]. - n_parameters : int + _n_parameters : int The number of parameters in the model, which is 1 in this case. bounds : dict A dictionary containing the lower and upper bounds for the parameter, @@ -40,7 +40,7 @@ def __init__(self, problem=None): super().__init__(problem) self.x0 = np.array([4.2]) - self.n_parameters = len(self.x0) + self._n_parameters = len(self.x0) self.bounds = dict( lower=[-1], diff --git a/pybop/__init__.py b/pybop/__init__.py index b623add9e..236c15012 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -23,6 +23,11 @@ # Absolute path to the pybop repo script_path = path.dirname(__file__) +# +# Problem class +# +from ._problem import BaseProblem, FittingProblem, DesignProblem + # # Cost function class # @@ -37,6 +42,11 @@ GravimetricEnergyDensity, VolumetricEnergyDensity, ) +from .costs._likelihoods import ( + BaseLikelihood, + GaussianLogLikelihood, + GaussianLogLikelihoodKnownSigma, +) # # Dataset class @@ -84,10 +94,6 @@ from .parameters.parameter_set import ParameterSet from .parameters.priors import Gaussian, Uniform, Exponential -# -# Problem class -# -from ._problem import FittingProblem, DesignProblem # # Observer classes diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py index 7ce236302..3eb839ef8 100644 --- a/pybop/_optimisation.py +++ b/pybop/_optimisation.py @@ -29,7 +29,7 @@ class Optimisation: Initial parameter values for the optimization. bounds : dict Dictionary containing the parameter bounds with keys 'lower' and 'upper'. - n_parameters : int + _n_parameters : int Number of parameters in the optimization problem. sigma0 : float or sequence Initial step size or standard deviation for the optimiser. @@ -40,6 +40,7 @@ class Optimisation: def __init__( self, cost, + x0=None, optimiser=None, sigma0=None, verbose=False, @@ -47,12 +48,12 @@ def __init__( allow_infeasible_solutions=True, ): self.cost = cost + self.x0 = x0 or cost.x0 self.optimiser = optimiser self.verbose = verbose - self.x0 = cost.x0 self.bounds = cost.bounds self.sigma0 = sigma0 or cost.sigma0 - self.n_parameters = cost.n_parameters + self._n_parameters = cost._n_parameters self.physical_viability = physical_viability self.allow_infeasible_solutions = allow_infeasible_solutions self.log = [] @@ -74,12 +75,10 @@ def __init__( self._transformation = None # Check if minimising or maximising - self._minimising = not isinstance(cost, pints.LogPDF) - if self._minimising: - self._function = self.cost - else: - self._function = pints.ProbabilityBasedError(cost) - del cost + if isinstance(cost, pybop.BaseLikelihood): + self.cost._minimising = False + self._minimising = self.cost._minimising + self._function = self.cost # Construct Optimiser self.pints = True @@ -122,6 +121,10 @@ def __init__( self._max_iterations = None self.set_max_iterations() + # Minimum iterations + self._min_iterations = None + self.set_min_iterations() + # Maximum unchanged iterations self._unchanged_threshold = 1 # smallest significant f change self._unchanged_max_iterations = None @@ -289,6 +292,7 @@ def _run_pints(self): halt = ( self._unchanged_max_iterations is not None and unchanged_iterations >= self._unchanged_max_iterations + and iteration >= self._min_iterations ) if running and halt: running = False @@ -448,6 +452,22 @@ def set_max_iterations(self, iterations=1000): raise ValueError("Maximum number of iterations cannot be negative.") self._max_iterations = iterations + def set_min_iterations(self, iterations=2): + """ + Set the minimum number of iterations as a stopping criterion. + + Parameters + ---------- + iterations : int, optional + The minimum number of iterations to run (default is 100). + Set to `None` to remove this stopping criterion. + """ + if iterations is not None: + iterations = int(iterations) + if iterations < 0: + raise ValueError("Minimum number of iterations cannot be negative.") + self._min_iterations = iterations + def set_max_unchanged_iterations(self, iterations=5, threshold=1e-5): """ Set the maximum number of iterations without significant change as a stopping criterion. diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py new file mode 100644 index 000000000..031344933 --- /dev/null +++ b/pybop/costs/_likelihoods.py @@ -0,0 +1,164 @@ +import numpy as np +from pybop.costs.base_cost import BaseCost + + +class BaseLikelihood(BaseCost): + """ + Base class for likelihoods + """ + + def __init__(self, problem, sigma=None): + super(BaseLikelihood, self).__init__(problem, sigma) + self._n_times = problem.n_time_data + + def set_sigma(self, sigma): + """ + Setter for sigma parameter + """ + + if sigma is not type(np.array([])): + try: + sigma = np.array(sigma) + except Exception: + raise ValueError("Sigma must be a numpy array") + + if np.any(sigma <= 0): + raise ValueError("Sigma must not be negative") + else: + self.sigma0 = sigma + + def get_sigma(self): + """ + Getter for sigma parameter + """ + return self.sigma0 + + def get_n_parameters(self): + """ + Returns the number of parameters + """ + return self._n_parameters + + +class GaussianLogLikelihoodKnownSigma(BaseLikelihood): + """ + This class represents a Gaussian Log Likelihood with a known sigma, + which assumes that the data follows a Gaussian distribution and computes + the log-likelihood of observed data under this assumption. + + Attributes: + _logpi (float): Precomputed offset value for the log-likelihood function. + """ + + def __init__(self, problem, sigma): + super(GaussianLogLikelihoodKnownSigma, self).__init__(problem, sigma) + if sigma is not None: + self.set_sigma(sigma) + self._offset = -0.5 * self._n_times * np.log(2 * np.pi / self.sigma0) + self._multip = -1 / (2.0 * self.sigma0**2) + self.sigma2 = self.sigma0**-2 + self._dl = np.ones(self._n_parameters) + + def _evaluate(self, x, grad=None): + """ + Calls the problem.evaluate method and calculates + the log-likelihood + """ + e = self._target - self.problem.evaluate(x) + return np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) + + def _evaluateS1(self, x, grad=None): + """ + Calls the problem.evaluateS1 method and calculates + the log-likelihood + """ + + y, dy = self.problem.evaluateS1(x) + if len(y) < len(self._target): + likelihood = -np.float64(np.inf) + dl = self._dl * np.ones(self._n_parameters) + else: + dy = dy.reshape( + ( + self._n_times, + self.n_outputs, + self._n_parameters, + ) + ) + e = self._target - y + likelihood = np.sum(self._offset + self._multip * np.sum(e**2, axis=0)) + dl = np.sum((self.sigma2 * np.sum((e.T * dy.T), axis=2)), axis=1) + + return likelihood, dl + + +class GaussianLogLikelihood(BaseLikelihood): + """ + This class represents a Gaussian Log Likelihood, which assumes that the + data follows a Gaussian distribution and computes the log-likelihood of + observed data under this assumption. + + Attributes: + _logpi (float): Precomputed offset value for the log-likelihood function. + """ + + def __init__(self, problem): + super(GaussianLogLikelihood, self).__init__(problem) + self._logpi = -0.5 * self._n_times * np.log(2 * np.pi) + self._dl = np.ones(self._n_parameters + self.n_outputs) + + def _evaluate(self, x, grad=None): + """ + Evaluates the Gaussian log-likelihood for the given parameters. + + Args: + x (array_like): The parameters for which to evaluate the log-likelihood. + The last `self.n_outputs` elements are assumed to be the + standard deviations of the Gaussian distributions. + + Returns: + float: The log-likelihood value, or -inf if the standard deviations are received as non-positive. + """ + sigma = np.asarray(x[-self.n_outputs :]) + + if np.any(sigma <= 0): + return -np.inf + + e = self._target - self.problem.evaluate(x[: -self.n_outputs]) + return np.sum( + self._logpi + - self._n_times * np.log(sigma) + - np.sum(e**2, axis=0) / (2.0 * sigma**2) + ) + + def _evaluateS1(self, x, grad=None): + """ + Calls the problem.evaluateS1 method and calculates + the log-likelihood + """ + sigma = np.asarray(x[-self.n_outputs :]) + + if np.any(sigma <= 0): + return -np.inf, self._dl + + y, dy = self.problem.evaluateS1(x[: -self.n_outputs]) + if len(y) < len(self._target): + likelihood = -np.float64(np.inf) + dl = self._dl + else: + dy = dy.reshape( + ( + self._n_times, + self.n_outputs, + self._n_parameters, + ) + ) + e = self._target - y + likelihood = self._evaluate(x) + dl = np.sum((sigma**-(2.0) * np.sum((e.T * dy.T), axis=2)), axis=1) + + # Add sigma gradient to dl + dsigma = -self._n_times / sigma + sigma**-(3.0) * np.sum(e**2, axis=0) + dl = np.concatenate((dl, dsigma)) + + return likelihood, dl diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 4c572121a..a13381aec 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,3 +1,7 @@ +from pybop import BaseProblem +import numpy as np + + class BaseCost: """ Base class for defining cost functions. @@ -22,28 +26,39 @@ class BaseCost: Initial standard deviation around ``x0``. Either a scalar value (one standard deviation for all coordinates) or an array with one entry per dimension. Not all methods will use this information. - n_parameters : int + _n_parameters : int The number of parameters in the model. n_outputs : int The number of outputs in the model. """ - def __init__(self, problem): + def __init__(self, problem=None, sigma=None): self.problem = problem self.x0 = None self.bounds = None - self.sigma0 = None - if problem is not None: + self.sigma0 = sigma + self._minimising = True + if isinstance(self.problem, BaseProblem): self._target = problem._target self.x0 = problem.x0 self.bounds = problem.bounds - self.sigma0 = problem.sigma0 - self.n_parameters = problem.n_parameters self.n_outputs = problem.n_outputs + self._n_parameters = problem.n_parameters + self.sigma0 = sigma or problem.sigma0 or np.zeros(self._n_parameters) + + @property + def n_parameters(self): + return self._n_parameters def __call__(self, x, grad=None): """ Call the evaluate function for a given set of parameters. + """ + return self.evaluate(x, grad) + + def evaluate(self, x, grad=None): + """ + Call the evaluate function for a given set of parameters. Parameters ---------- @@ -64,7 +79,10 @@ def __call__(self, x, grad=None): If an error occurs during the calculation of the cost. """ try: - return self._evaluate(x, grad) + if self._minimising: + return self._evaluate(x, grad) + else: # minimise the negative cost + return -self._evaluate(x, grad) except NotImplementedError as e: raise e @@ -119,7 +137,11 @@ def evaluateS1(self, x): If an error occurs during the calculation of the cost or gradient. """ try: - return self._evaluateS1(x) + if self._minimising: + return self._evaluateS1(x) + else: # minimise the negative cost + L, dl = self._evaluateS1(x) + return -L, -dl except NotImplementedError as e: raise e diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py index 8db3111c6..a0ebc664f 100644 --- a/pybop/costs/fitting_costs.py +++ b/pybop/costs/fitting_costs.py @@ -19,6 +19,9 @@ class RootMeanSquaredError(BaseCost): def __init__(self, problem): super(RootMeanSquaredError, self).__init__(problem) + # Default fail gradient + self._de = 1.0 + def _evaluate(self, x, grad=None): """ Calculate the root mean square error for a given set of parameters. @@ -67,13 +70,13 @@ def _evaluateS1(self, x): y, dy = self.problem.evaluateS1(x) if len(y) < len(self._target): e = np.float64(np.inf) - de = self._de * np.ones(self.n_parameters) + de = self._de * np.ones(self._n_parameters) else: dy = dy.reshape( ( self.problem.n_time_data, self.n_outputs, - self.n_parameters, + self._n_parameters, ) ) r = y - self._target @@ -84,6 +87,21 @@ def _evaluateS1(self, x): return e, de.flatten() + def set_fail_gradient(self, de): + """ + Set the fail gradient to a specified value. + + The fail gradient is used if an error occurs during the calculation + of the gradient. This method allows updating the default gradient value. + + Parameters + ---------- + de : float + The new fail gradient value to be used. + """ + de = float(de) + self._de = de + class SumSquaredError(BaseCost): """ @@ -159,13 +177,13 @@ def _evaluateS1(self, x): y, dy = self.problem.evaluateS1(x) if len(y) < len(self._target): e = np.float64(np.inf) - de = self._de * np.ones(self.n_parameters) + de = self._de * np.ones(self._n_parameters) else: dy = dy.reshape( ( self.problem.n_time_data, self.n_outputs, - self.n_parameters, + self._n_parameters, ) ) r = y - self._target diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 78c82d3ed..c4a1f8df9 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -36,7 +36,13 @@ def x0(self): def init_soc(self, request): return request.param - @pytest.fixture(params=[pybop.RootMeanSquaredError, pybop.SumSquaredError]) + @pytest.fixture( + params=[ + pybop.GaussianLogLikelihoodKnownSigma, + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + ] + ) def cost_class(self, request): return request.param @@ -57,7 +63,10 @@ def spm_cost(self, parameters, model, x0, cost_class, init_soc): problem = pybop.FittingProblem( model, parameters, dataset, signal=signal, init_soc=init_soc ) - return cost_class(problem) + if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]: + return cost_class(problem, sigma=[0.01, 0.01]) + else: + return cost_class(problem) @pytest.mark.parametrize( "optimiser", @@ -124,7 +133,8 @@ def test_spm_optimisers(self, optimiser, spm_cost, x0): x, final_cost = parameterisation.run() # Assertions - np.testing.assert_allclose(final_cost, 0, atol=1e-2) + if not isinstance(spm_cost, pybop.GaussianLogLikelihoodKnownSigma): + np.testing.assert_allclose(final_cost, 0, atol=1e-2) np.testing.assert_allclose(x, x0, atol=5e-2) @pytest.fixture diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index aa127ffd4..49e3d4ee7 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -66,13 +66,17 @@ def problem(self, model, parameters, dataset, signal, x0, request): return problem @pytest.fixture( - params=[pybop.RootMeanSquaredError, pybop.SumSquaredError, pybop.ObserverCost] + params=[ + pybop.RootMeanSquaredError, + pybop.SumSquaredError, + pybop.ObserverCost, + ] ) def cost(self, problem, request): cls = request.param - if cls == pybop.RootMeanSquaredError or cls == pybop.SumSquaredError: + if cls in [pybop.SumSquaredError, pybop.RootMeanSquaredError]: return cls(problem) - elif cls == pybop.ObserverCost: + elif cls in [pybop.ObserverCost]: inputs = {p.name: problem.x0[i] for i, p in enumerate(problem.parameters)} state = problem._model.reinit(inputs) n = len(state) @@ -133,15 +137,15 @@ def test_costs(self, cost): with pytest.warns(UserWarning) as record: cost([1.1]) + # Test option setting + cost.set_fail_gradient(1) + if isinstance(cost, pybop.SumSquaredError): e, de = cost.evaluateS1([0.5]) assert type(e) == np.float64 assert type(de) == np.ndarray - # Test option setting - cost.set_fail_gradient(1) - # Test exception for non-numeric inputs with pytest.raises(ValueError): cost.evaluateS1(["StringInputShouldNotWork"]) diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py new file mode 100644 index 000000000..02db1e87f --- /dev/null +++ b/tests/unit/test_likelihoods.py @@ -0,0 +1,134 @@ +from __future__ import annotations +import pytest +import pybop +import numpy as np + + +class TestLikelihoods: + """ + Class for likelihood unit tests + """ + + @pytest.fixture + def model(self): + return pybop.lithium_ion.SPM() + + @pytest.fixture + def parameters(self): + return [ + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.01), + bounds=[0.375, 0.625], + ), + ] + + @pytest.fixture + def experiment(self): + return pybop.Experiment( + [ + ("Discharge at 1C for 10 minutes (20 second period)"), + ] + ) + + @pytest.fixture + def x0(self): + return np.array([0.52]) + + @pytest.fixture + def dataset(self, model, experiment, x0): + model.parameter_set = model.pybamm_model.default_parameter_values + model.parameter_set.update( + { + "Negative electrode active material volume fraction": x0[0], + } + ) + solution = model.predict(experiment=experiment) + return pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Terminal voltage [V]"].data, + } + ) + + @pytest.fixture + def signal(self): + return "Voltage [V]" + + @pytest.fixture() + def problem(self, model, parameters, dataset, signal, x0): + problem = pybop.FittingProblem( + model, parameters, dataset, signal=signal, x0=x0, init_soc=1.0 + ) + return problem + + @pytest.mark.unit + def test_base_likelihood_init(self, problem): + likelihood = pybop.BaseLikelihood(problem, sigma=np.array([0.2])) + assert likelihood.problem == problem + assert likelihood.n_outputs == 1 + assert likelihood._n_times == problem.n_time_data + assert np.array_equal(likelihood.get_sigma(), np.array([0.2])) + assert likelihood.x0 == problem.x0 + assert likelihood.bounds == problem.bounds + assert likelihood._n_parameters == 1 + assert np.array_equal(likelihood._target, problem._target) + + @pytest.mark.unit + def test_base_likelihood_call_raises_not_implemented_error(self, problem): + likelihood = pybop.BaseLikelihood(problem) + with pytest.raises(NotImplementedError): + likelihood(np.array([0.5, 0.5])) + + @pytest.mark.unit + def test_base_likelihood_set_get_sigma(self, problem): + likelihood = pybop.BaseLikelihood(problem) + likelihood.set_sigma(np.array([0.3])) + assert np.array_equal(likelihood.get_sigma(), np.array([0.3])) + + @pytest.mark.unit + def test_base_likelihood_set_sigma_raises_value_error_for_negative_sigma( + self, problem + ): + likelihood = pybop.BaseLikelihood(problem) + with pytest.raises(ValueError): + likelihood.set_sigma(np.array([-0.2])) + + @pytest.mark.unit + def test_base_likelihood_get_n_parameters(self, problem): + likelihood = pybop.BaseLikelihood(problem) + assert likelihood.get_n_parameters() == 1 + + @pytest.mark.unit + def test_base_likelihood_n_parameters_property(self, problem): + likelihood = pybop.BaseLikelihood(problem) + assert likelihood.n_parameters == 1 + + @pytest.mark.unit + def test_gaussian_log_likelihood_known_sigma(self, problem): + likelihood = pybop.GaussianLogLikelihoodKnownSigma( + problem, sigma=np.array([1.0]) + ) + result = likelihood(np.array([0.5])) + grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5])) + assert isinstance(result, float) + np.testing.assert_allclose(result, grad_result, atol=1e-5) + assert np.all(grad_likelihood <= 0) + + @pytest.mark.unit + def test_gaussian_log_likelihood(self, problem): + likelihood = pybop.GaussianLogLikelihood(problem) + result = likelihood(np.array([0.5, 0.5])) + grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5])) + assert isinstance(result, float) + np.testing.assert_allclose(result, grad_result, atol=1e-5) + assert np.all(grad_likelihood <= 0) + + @pytest.mark.unit + def test_gaussian_log_likelihood_call_returns_negative_inf_for_non_positive_sigma( + self, problem + ): + likelihood = pybop.GaussianLogLikelihood(problem) + result = likelihood(np.array([-0.5])) + assert result == -np.inf diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index a7c733090..e0c077c36 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -111,12 +111,15 @@ def test_halting(self, cost): # Test max unchanged iterations optim = pybop.Optimisation(cost=cost, optimiser=pybop.GradientDescent) optim.set_max_unchanged_iterations(1) + optim.set_min_iterations(1) x, __ = optim.run() assert optim._iterations == 2 - # Test invalid maximum values + # Test invalid values with pytest.raises(ValueError): optim.set_max_evaluations(-1) + with pytest.raises(ValueError): + optim.set_min_iterations(-1) with pytest.raises(ValueError): optim.set_max_unchanged_iterations(-1) with pytest.raises(ValueError): diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py index 4ba611a96..430dc925c 100644 --- a/tests/unit/test_standalone.py +++ b/tests/unit/test_standalone.py @@ -17,7 +17,7 @@ def test_standalone(self): opt = pybop.Optimisation(cost=cost, optimiser=pybop.SciPyDifferentialEvolution) x, final_cost = opt.run() - assert len(opt.x0) == opt.n_parameters + assert len(opt.x0) == opt._n_parameters np.testing.assert_allclose(x, 0, atol=1e-2) np.testing.assert_allclose(final_cost, 42, atol=1e-2)