From c7df8c7d0777e188bddac00ff9a46ac05f651fad Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 10 May 2024 10:14:55 +0100 Subject: [PATCH 01/12] fixes parameterisation integration test --- tests/integration/test_parameterisations.py | 49 ++++++++++++--------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 32bd4033..f880e33f 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -25,12 +25,12 @@ def parameters(self): return [ pybop.Parameter( "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.55, 0.05), + prior=pybop.Uniform(0.35, 0.75), bounds=[0.375, 0.75], ), pybop.Parameter( "Positive electrode active material volume fraction", - prior=pybop.Gaussian(0.55, 0.05), + prior=pybop.Uniform(0.35, 0.75), # no bounds ), ] @@ -95,7 +95,10 @@ def spm_costs(self, model, parameters, cost_class, init_soc): @pytest.mark.integration def test_spm_optimisers(self, optimiser, spm_costs): # Some optimisers require a complete set of bounds - if optimiser in [pybop.SciPyDifferentialEvolution, pybop.PSO]: + if optimiser in [ + pybop.SciPyDifferentialEvolution, + pybop.PSO, + ]: spm_costs.problem.parameters[1].set_bounds( [0.3, 0.8] ) # Large range to ensure IC within bounds @@ -107,29 +110,33 @@ def test_spm_optimisers(self, optimiser, spm_costs): spm_costs.bounds = bounds # Test each optimiser - parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.05 - ) - parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) - parameterisation.set_max_iterations(125) - - initial_cost = parameterisation.cost(spm_costs.x0) - if optimiser in [pybop.GradientDescent]: if isinstance( spm_costs, (pybop.GaussianLogLikelihoodKnownSigma, pybop.MAP) ): - parameterisation.optimiser.set_learning_rate(1.8e-5) + parameterisation = pybop.Optimisation( + cost=spm_costs, optimiser=optimiser, sigma0=5e-5 + ) else: - parameterisation.optimiser.set_learning_rate(0.015) - x, final_cost = parameterisation.run() - + parameterisation = pybop.Optimisation( + cost=spm_costs, optimiser=optimiser, sigma0=0.02 + ) elif optimiser in [pybop.SciPyMinimize]: - parameterisation.cost.problem.model.allow_infeasible_solutions = False - x, final_cost = parameterisation.run() - + parameterisation = pybop.Optimisation( + cost=spm_costs, + optimiser=optimiser, + sigma0=0.05, + allow_infeasible_solutions=False, + ) else: - x, final_cost = parameterisation.run() + parameterisation = pybop.Optimisation( + cost=spm_costs, optimiser=optimiser, sigma0=0.05 + ) + + parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) + parameterisation.set_max_iterations(125) + initial_cost = parameterisation.cost(spm_costs.x0) + x, final_cost = parameterisation.run() # Assertions assert initial_cost > final_cost @@ -248,8 +255,8 @@ def getdata(self, model, x, init_soc): experiment = pybop.Experiment( [ ( - "Discharge at 0.5C for 3 minutes (2 second period)", - "Charge at 0.5C for 3 minutes (2 second period)", + "Discharge at 0.5C for 6 minutes (4 second period)", + "Charge at 0.5C for 6 minutes (4 second period)", ), ] * 2 From 46d5c46a5685a236170bba9f8e759649f9ee3d19 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 10 May 2024 12:04:14 +0100 Subject: [PATCH 02/12] tighten assertions, split scipy.minimize --- tests/integration/test_parameterisations.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index f880e33f..3c0d1b95 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -140,7 +140,10 @@ def test_spm_optimisers(self, optimiser, spm_costs): # Assertions assert initial_cost > final_cost - np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) + if optimiser in [pybop.SciPyMinimize]: + np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) + else: + np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, cost_class): From 993c8afcf8c6de715ef87a8e5d57d4dba45b3341 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 10 May 2024 15:36:04 +0100 Subject: [PATCH 03/12] splits integration asserts per pybamm version --- tests/integration/test_parameterisations.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 3c0d1b95..12587765 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -1,5 +1,6 @@ import numpy as np import pytest +from pybamm import __version__ as pybamm_version import pybop @@ -140,10 +141,17 @@ def test_spm_optimisers(self, optimiser, spm_costs): # Assertions assert initial_cost > final_cost - if optimiser in [pybop.SciPyMinimize]: - np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) + + if pybamm_version <= "23.9": + if optimiser in [pybop.SciPyMinimize]: + np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) + else: + np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) else: - np.testing.assert_allclose(x, self.ground_truth, atol=1.5e-2) + if optimiser in [pybop.SciPyMinimize]: + np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) + else: + np.testing.assert_allclose(x, self.ground_truth, atol=1.75e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, cost_class): From 5943c0a2a29ac4115c3e501e4eb306bbd3e1db4d Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 10 May 2024 16:38:50 +0100 Subject: [PATCH 04/12] shift infeasible integration test to pybop.Adam --- tests/integration/test_parameterisations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 12587765..a6b1a2b0 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -122,7 +122,7 @@ def test_spm_optimisers(self, optimiser, spm_costs): parameterisation = pybop.Optimisation( cost=spm_costs, optimiser=optimiser, sigma0=0.02 ) - elif optimiser in [pybop.SciPyMinimize]: + elif optimiser in [pybop.Adam] and spm_costs in [pybop.SumSquaredError]: parameterisation = pybop.Optimisation( cost=spm_costs, optimiser=optimiser, From 8576c8a087e50f3d028f7313d0fad36cfc495f5e Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Sun, 12 May 2024 20:02:53 +0100 Subject: [PATCH 05/12] test(integr): Adds x0 resampling to SciPyMinimize Adds a method to sample initial points within the problem class, enabling a resample method within SciPy.Minimize for an infinite initial cost, and updating prior classes to have a shared base. --- pybop/optimisers/scipy_optimisers.py | 15 +- pybop/parameters/parameter.py | 4 +- pybop/parameters/priors.py | 257 +++++++------------- pybop/problems/base_problem.py | 41 +++- tests/integration/test_parameterisations.py | 6 +- tests/unit/test_parameters.py | 2 +- tests/unit/test_priors.py | 8 +- 7 files changed, 146 insertions(+), 187 deletions(-) diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index 185a09ab..6354ca1e 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -25,6 +25,7 @@ def __init__(self, method=None, bounds=None, maxiter=None, tol=1e-5): super().__init__() self.method = method self.bounds = bounds + self.num_resamples = 40 self.tol = tol self.options = {} self._max_iterations = maxiter @@ -56,11 +57,19 @@ def _runoptimise(self, cost_function, x0): def callback(x): self.log.append([x]) - # Scale the cost function and eliminate nan values + # Check x0 and resample if required self.cost0 = cost_function(x0) - self.inf_count = 0 if np.isinf(self.cost0): - raise Exception("The initial parameter values return an infinite cost.") + for i in range(1, self.num_resamples): + x0 = cost_function.problem.sample_initial_conditions(seed=i) + self.cost0 = cost_function(x0) + if not np.isinf(self.cost0): + break + if np.isinf(self.cost0): + raise Exception("The initial parameter values return an infinite cost.") + + # Scale the cost function and eliminate nan values + self.inf_count = 0 def cost_wrapper(x): cost = cost_function(x) / self.cost0 diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index c1b4ad1c..52b700bb 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -42,7 +42,7 @@ def __init__( self.set_bounds(bounds) self.margin = 1e-4 - def rvs(self, n_samples): + def rvs(self, n_samples, random_state=None): """ Draw random samples from the parameter's prior distribution. @@ -59,7 +59,7 @@ def rvs(self, n_samples): array-like An array of samples drawn from the prior distribution within the parameter's bounds. """ - samples = self.prior.rvs(n_samples) + samples = self.prior.rvs(n_samples, random_state=random_state) # Constrain samples to be within bounds if self.bounds is not None: diff --git a/pybop/parameters/priors.py b/pybop/parameters/priors.py index bdb64eb2..9ed144cc 100644 --- a/pybop/parameters/priors.py +++ b/pybop/parameters/priors.py @@ -2,41 +2,43 @@ import scipy.stats as stats -class Gaussian: +class BasePrior: """ - Represents a Gaussian (normal) distribution with a given mean and standard deviation. + A base class for defining prior distributions. - This class provides methods to calculate the probability density function (pdf), - the logarithm of the pdf, and to generate random variates (rvs) from the distribution. + This class provides a foundation for implementing various prior distributions. + It includes methods for calculating the probability density function (PDF), + log probability density function (log PDF), and generating random variates + from the distribution. - Parameters + Attributes ---------- - mean : float - The mean (mu) of the Gaussian distribution. - sigma : float - The standard deviation (sigma) of the Gaussian distribution. + prior : scipy.stats.rv_continuous + The underlying continuous random variable distribution. + loc : float + The location parameter of the distribution. + scale : float + The scale parameter of the distribution. """ - def __init__(self, mean, sigma): - self.name = "Gaussian" - self.mean = mean - self.sigma = sigma + def __init__(self): + pass def pdf(self, x): """ - Calculates the probability density function of the Gaussian distribution at x. + Calculates the probability density function (PDF) of the Gaussian distribution at x. Parameters ---------- x : float - The point at which to evaluate the pdf. + The point(s) at which to evaluate the pdf. Returns ------- float The probability density function value at x. """ - return stats.norm.pdf(x, loc=self.mean, scale=self.sigma) + return self.prior.pdf(x, loc=self.loc, scale=self.scale) def logpdf(self, x): """ @@ -45,16 +47,16 @@ def logpdf(self, x): Parameters ---------- x : float - The point at which to evaluate the log pdf. + The point(s) at which to evaluate the log pdf. Returns ------- float The logarithm of the probability density function value at x. """ - return stats.norm.logpdf(x, loc=self.mean, scale=self.sigma) + return self.prior.logpdf(x, loc=self.loc, scale=self.scale) - def rvs(self, size): + def rvs(self, size=1, random_state=None): """ Generates random variates from the Gaussian distribution. @@ -73,101 +75,94 @@ def rvs(self, size): ValueError If the size parameter is negative. """ - if size < 0: - raise ValueError("size must be positive") - else: - return stats.norm.rvs(loc=self.mean, scale=self.sigma, size=size) + if not isinstance(size, (int, tuple)): + raise ValueError( + "size must be a positive integer or tuple of positive integers" + ) + if isinstance(size, int) and size < 1: + raise ValueError("size must be a positive integer") + if isinstance(size, tuple) and any(s < 1 for s in size): + raise ValueError("size must be a tuple of positive integers") + + return self.prior.rvs( + loc=self.loc, scale=self.scale, size=size, random_state=random_state + ) def __repr__(self): """ Returns a string representation of the Gaussian object. """ - return f"{self.name}, mean: {self.mean}, sigma: {self.sigma}" - - -class Uniform: - """ - Represents a uniform distribution over a specified interval. + return f"{self.__class__.__name__}, loc: {self.loc}, scale: {self.scale}" - This class provides methods to calculate the pdf, the log pdf, and to generate - random variates from the distribution. - - Parameters - ---------- - lower : float - The lower bound of the distribution. - upper : float - The upper bound of the distribution. - """ - - def __init__(self, lower, upper): - self.name = "Uniform" - self.lower = lower - self.upper = upper - - def pdf(self, x): + @property + def mean(self): """ - Calculates the probability density function of the uniform distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the pdf. + Get the mean of the distribution. Returns ------- float - The probability density function value at x. + The mean of the distribution. """ - return stats.uniform.pdf(x, loc=self.lower, scale=self.upper - self.lower) + return self.loc - def logpdf(self, x): + @property + def sigma(self): """ - Calculates the logarithm of the pdf of the uniform distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the log pdf. + Get the standard deviation of the distribution. Returns ------- float - The log of the probability density function value at x. + The standard deviation of the distribution. """ - return stats.uniform.logpdf(x, loc=self.lower, scale=self.upper - self.lower) + return self.scale - def rvs(self, size): - """ - Generates random variates from the uniform distribution. - Parameters - ---------- - size : int - The number of random variates to generate. +class Gaussian(BasePrior): + """ + Represents a Gaussian (normal) distribution with a given mean and standard deviation. - Returns - ------- - array_like - An array of random variates from the uniform distribution. + This class provides methods to calculate the probability density function (pdf), + the logarithm of the pdf, and to generate random variates (rvs) from the distribution. - Raises - ------ - ValueError - If the size parameter is negative. - """ - if size < 0: - raise ValueError("size must be positive") - else: - return stats.uniform.rvs( - loc=self.lower, scale=self.upper - self.lower, size=size - ) + Parameters + ---------- + mean : float + The mean (mu) of the Gaussian distribution. + sigma : float + The standard deviation (sigma) of the Gaussian distribution. + """ - def __repr__(self): - """ - Returns a string representation of the Uniform object. - """ - return f"{self.name}, lower: {self.lower}, upper: {self.upper}" + def __init__(self, mean, sigma, random_state=None): + self.name = "Gaussian" + self.loc = mean + self.scale = sigma + self.prior = stats.norm + + +class Uniform(BasePrior): + """ + Represents a uniform distribution over a specified interval. + + This class provides methods to calculate the pdf, the log pdf, and to generate + random variates from the distribution. + + Parameters + ---------- + lower : float + The lower bound of the distribution. + upper : float + The upper bound of the distribution. + """ + + def __init__(self, lower, upper, random_state=None): + self.name = "Uniform" + self.lower = lower + self.upper = upper + self.loc = lower + self.scale = upper - lower + self.prior = stats.uniform @property def mean(self): @@ -184,7 +179,7 @@ def sigma(self): return (self.upper - self.lower) / (2 * np.sqrt(3)) -class Exponential: +class Exponential(BasePrior): """ Represents an exponential distribution with a specified scale parameter. @@ -197,82 +192,8 @@ class Exponential: The scale parameter (lambda) of the exponential distribution. """ - def __init__(self, scale): + def __init__(self, scale, loc=0, random_state=None): self.name = "Exponential" + self.loc = loc self.scale = scale - - def pdf(self, x): - """ - Calculates the probability density function of the exponential distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the pdf. - - Returns - ------- - float - The probability density function value at x. - """ - return stats.expon.pdf(x, scale=self.scale) - - def logpdf(self, x): - """ - Calculates the logarithm of the pdf of the exponential distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the log pdf. - - Returns - ------- - float - The log of the probability density function value at x. - """ - return stats.expon.logpdf(x, scale=self.scale) - - def rvs(self, size): - """ - Generates random variates from the exponential distribution. - - Parameters - ---------- - size : int - The number of random variates to generate. - - Returns - ------- - array_like - An array of random variates from the exponential distribution. - - Raises - ------ - ValueError - If the size parameter is negative. - """ - if size < 0: - raise ValueError("size must be positive") - else: - return stats.expon.rvs(scale=self.scale, size=size) - - def __repr__(self): - """ - Returns a string representation of the Uniform object. - """ - return f"{self.name}, scale: {self.scale}" - - @property - def mean(self): - """ - Returns the mean of the distribution. - """ - return self.scale - - @property - def sigma(self): - """ - Returns the standard deviation of the distribution. - """ - return self.scale + self.prior = stats.expon diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index 37a6af25..530bad10 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -80,18 +80,47 @@ def __init__( if not all_have_sigma: self.sigma0 = None - # Sample from prior for x0 - if x0 is None: - self.x0 = np.zeros(self.n_parameters) - for i, param in enumerate(self.parameters): - self.x0[i] = param.rvs(1) - elif len(x0) != self.n_parameters: + # Set initial conditions + if self.x0 is None: + self.x0 = self.sample_initial_conditions() + elif len(self.x0) != self.n_parameters: raise ValueError("x0 dimensions do not match number of parameters") # Add the initial values to the parameter definitions for i, param in enumerate(self.parameters): param.update(initial_value=self.x0[i]) + def sample_initial_conditions( + self, num_samples: int = 1, seed: int = None + ) -> np.ndarray: + """ + Sample initial conditions for the problem. + + Parameters + ---------- + num_samples : int, optional + The number of initial condition samples to generate (default is 1). + seed : int, optional + The seed value for the random number generator (default is None). + + Returns + ------- + np.ndarray + An array of initial conditions for the problem. Each row corresponds to a sample. + """ + # Create a local random generator + rng = np.random.default_rng(seed) + + # Initialize + x0 = np.zeros((num_samples, self.n_parameters)) + + # Vectorized sampling from the parameters + for j, param in enumerate(self.parameters): + x0[:, j] = param.rvs(num_samples, random_state=rng) + + # Return a flattened array if only one sample, otherwise return the 2D array + return x0[0] if num_samples == 1 else x0 + def evaluate(self, x): """ Evaluate the model with the given parameters and return the signal. diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index a6b1a2b0..61ff6bdf 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -122,7 +122,7 @@ def test_spm_optimisers(self, optimiser, spm_costs): parameterisation = pybop.Optimisation( cost=spm_costs, optimiser=optimiser, sigma0=0.02 ) - elif optimiser in [pybop.Adam] and spm_costs in [pybop.SumSquaredError]: + elif optimiser in [pybop.SciPyMinimize]: parameterisation = pybop.Optimisation( cost=spm_costs, optimiser=optimiser, @@ -143,8 +143,8 @@ def test_spm_optimisers(self, optimiser, spm_costs): assert initial_cost > final_cost if pybamm_version <= "23.9": - if optimiser in [pybop.SciPyMinimize]: - np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) + if optimiser in [pybop.GradientDescent, pybop.SciPyMinimize]: + np.testing.assert_allclose(x, self.ground_truth, atol=4.0e-2) else: np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) else: diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 2ec2fcd5..19649cb9 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -31,7 +31,7 @@ def test_parameter_construction(self, parameter): def test_parameter_repr(self, parameter): assert ( repr(parameter) - == "Parameter: Negative electrode active material volume fraction \n Prior: Gaussian, mean: 0.6, sigma: 0.02 \n Bounds: [0.375, 0.7] \n Value: 0.6" + == "Parameter: Negative electrode active material volume fraction \n Prior: Gaussian, loc: 0.6, scale: 0.02 \n Bounds: [0.375, 0.7] \n Value: 0.6" ) @pytest.mark.unit diff --git a/tests/unit/test_priors.py b/tests/unit/test_priors.py index 4edaea58..823f56ed 100644 --- a/tests/unit/test_priors.py +++ b/tests/unit/test_priors.py @@ -38,7 +38,7 @@ def test_priors(self, Gaussian, Uniform, Exponential): np.testing.assert_allclose( Uniform.sigma, (Uniform.upper - Uniform.lower) / (2 * np.sqrt(3)), atol=1e-8 ) - assert Exponential.mean == Exponential.scale + assert Exponential.mean == Exponential.loc assert Exponential.sigma == Exponential.scale @pytest.mark.unit @@ -63,9 +63,9 @@ def test_exponential_rvs(self, Exponential): @pytest.mark.unit def test_repr(self, Gaussian, Uniform, Exponential): - assert repr(Gaussian) == "Gaussian, mean: 0.5, sigma: 1" - assert repr(Uniform) == "Uniform, lower: 0, upper: 1" - assert repr(Exponential) == "Exponential, scale: 1" + assert repr(Gaussian) == "Gaussian, loc: 0.5, scale: 1" + assert repr(Uniform) == "Uniform, loc: 0, scale: 1" + assert repr(Exponential) == "Exponential, loc: 0, scale: 1" @pytest.mark.unit def test_invalid_size(self, Gaussian, Uniform, Exponential): From 44c7bb897c5c6e36be4250b3d8c0e27f60c004f1 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 13 May 2024 11:18:03 +0100 Subject: [PATCH 06/12] test(integ): Adjusts sigma0, adds coverage for prior updt Add tests for the prior cls update and SciPyMinimize resampling. Also updates sigma0 for GradientDescent and loosens assertion --- pybop/__init__.py | 2 +- pybop/optimisers/scipy_optimisers.py | 4 ++- tests/integration/test_parameterisations.py | 4 +-- tests/unit/test_optimisation.py | 37 +++++++++++++++++++-- tests/unit/test_priors.py | 12 +++++++ 5 files changed, 53 insertions(+), 6 deletions(-) diff --git a/pybop/__init__.py b/pybop/__init__.py index c25abdc7..048d805e 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -119,7 +119,7 @@ # from .parameters.parameter import Parameter from .parameters.parameter_set import ParameterSet -from .parameters.priors import Gaussian, Uniform, Exponential +from .parameters.priors import BasePrior, Gaussian, Uniform, Exponential # diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index 6354ca1e..dd51237b 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -66,7 +66,9 @@ def callback(x): if not np.isinf(self.cost0): break if np.isinf(self.cost0): - raise Exception("The initial parameter values return an infinite cost.") + raise ValueError( + "The initial parameter values return an infinite cost." + ) # Scale the cost function and eliminate nan values self.inf_count = 0 diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 61ff6bdf..e7fe54eb 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -120,7 +120,7 @@ def test_spm_optimisers(self, optimiser, spm_costs): ) else: parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.02 + cost=spm_costs, optimiser=optimiser, sigma0=0.035 ) elif optimiser in [pybop.SciPyMinimize]: parameterisation = pybop.Optimisation( @@ -148,7 +148,7 @@ def test_spm_optimisers(self, optimiser, spm_costs): else: np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) else: - if optimiser in [pybop.SciPyMinimize]: + if optimiser in [pybop.GradientDescent, pybop.SciPyMinimize]: np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) else: np.testing.assert_allclose(x, self.ground_truth, atol=1.75e-2) diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 54674c95..01efe33e 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -54,7 +54,6 @@ def cost(self, model, one_parameter, dataset): model, one_parameter, dataset, - signal=["Voltage [V]"], ) return pybop.SumSquaredError(problem) @@ -64,7 +63,6 @@ def two_param_cost(self, model, two_parameters, dataset): model, two_parameters, dataset, - signal=["Voltage [V]"], ) return pybop.SumSquaredError(problem) @@ -142,6 +140,41 @@ def test_prior_sampling(self, cost): assert opt.x0 <= 0.62 and opt.x0 >= 0.58 + @pytest.mark.unit + @pytest.mark.parametrize( + "mean, sigma, expect_exception", + [ + (0.85, 0.2, False), + (0.85, 0.001, True), + ], + ) + def test_scipy_prior_resampling( + self, model, dataset, mean, sigma, expect_exception + ): + # Set up the parameter with a Gaussian prior + parameter = pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(mean, sigma), + bounds=[0.55, 0.95], + ) + + # Define the problem and cost + problem = pybop.FittingProblem(model, [parameter], dataset) + cost = pybop.SumSquaredError(problem) + + # Create the optimisation class with infeasible solutions disabled + opt = pybop.Optimisation( + cost=cost, optimiser=pybop.SciPyMinimize, allow_infeasible_solutions=False + ) + opt.set_max_iterations(1) + + # If small sigma, expect a ValueError due inability to resample a non np.inf cost + if expect_exception: + with pytest.raises(ValueError): + opt.run() + else: + opt.run() + @pytest.mark.unit def test_halting(self, cost): # Test max evalutions diff --git a/tests/unit/test_priors.py b/tests/unit/test_priors.py index 823f56ed..b773049f 100644 --- a/tests/unit/test_priors.py +++ b/tests/unit/test_priors.py @@ -21,6 +21,11 @@ def Uniform(self): def Exponential(self): return pybop.Exponential(scale=1) + @pytest.mark.unit + def test_base_prior(self): + base = pybop.BasePrior() + assert isinstance(base, pybop.BasePrior) + @pytest.mark.unit def test_priors(self, Gaussian, Uniform, Exponential): # Test pdf @@ -49,6 +54,13 @@ def test_gaussian_rvs(self, Gaussian): assert abs(mean - 0.5) < 0.2 assert abs(std - 1) < 0.2 + @pytest.mark.unit + def test_incorrect_rvs(self, Gaussian): + with pytest.raises(ValueError): + Gaussian.rvs(size="a") + with pytest.raises(ValueError): + Gaussian.rvs(size=(1, 2, -1)) + @pytest.mark.unit def test_uniform_rvs(self, Uniform): samples = Uniform.rvs(size=500) From 9e6e4b6f9d39a3016c2ecb53efb15dd48a445070 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 13 May 2024 13:08:17 +0100 Subject: [PATCH 07/12] test: increase assert for PSO --- tests/integration/test_parameterisations.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index e7fe54eb..5acc17de 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -143,13 +143,13 @@ def test_spm_optimisers(self, optimiser, spm_costs): assert initial_cost > final_cost if pybamm_version <= "23.9": - if optimiser in [pybop.GradientDescent, pybop.SciPyMinimize]: + if optimiser in [pybop.GradientDescent, pybop.PSO, pybop.SciPyMinimize]: np.testing.assert_allclose(x, self.ground_truth, atol=4.0e-2) else: np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) else: - if optimiser in [pybop.GradientDescent, pybop.SciPyMinimize]: - np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) + if optimiser in [pybop.GradientDescent, pybop.PSO, pybop.SciPyMinimize]: + np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) else: np.testing.assert_allclose(x, self.ground_truth, atol=1.75e-2) From c3bb6ecd464ffd533e67816c348411541091c308 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 13 May 2024 21:40:48 +0100 Subject: [PATCH 08/12] test: extend unchanged iterations --- tests/integration/test_parameterisations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 5acc17de..48372eb5 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -134,7 +134,7 @@ def test_spm_optimisers(self, optimiser, spm_costs): cost=spm_costs, optimiser=optimiser, sigma0=0.05 ) - parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) + parameterisation.set_max_unchanged_iterations(iterations=55, threshold=1e-5) parameterisation.set_max_iterations(125) initial_cost = parameterisation.cost(spm_costs.x0) x, final_cost = parameterisation.run() From 1eb817a16f457caa30166bb87ddb9013e7b0ec93 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 15 May 2024 16:10:50 +0100 Subject: [PATCH 09/12] fix: docstrings, tests, add changlog entry --- CHANGELOG.md | 2 ++ pybop/parameters/priors.py | 12 +++++++----- tests/unit/test_optimisation.py | 5 ++++- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e879450e..dbb561b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#321](https://github.com/pybop-team/PyBOP/pull/321) - Updates Prior classes with BaseClass, adds a `problem.sample_initial_conditions` method to improve stability of SciPy.Minimize optimiser. - [#304](https://github.com/pybop-team/PyBOP/pull/304) - Decreases the testing suite completion time. - [#301](https://github.com/pybop-team/PyBOP/pull/301) - Updates default echem solver to "fast with events" mode. - [#251](https://github.com/pybop-team/PyBOP/pull/251) - Increment PyBaMM > v23.5, remove redundant tests within integration tests, increment citation version, fix examples with incorrect model definitions. @@ -18,6 +19,7 @@ codesigned binaries and source distributions via `sigstore-python`. ## Bug Fixes +- [#321](https://github.com/pybop-team/PyBOP/pull/321) - Improves `integration/test_parameterisation` stability. - [#317](https://github.com/pybop-team/PyBOP/pull/317) - Installs seed packages into `nox` sessions, ensuring that scheduled tests can pass. - [#308](https://github.com/pybop-team/PyBOP/pull/308) - Enables testing on both macOS Intel and macOS ARM (Silicon) runners and fixes the scheduled tests. - [#299](https://github.com/pybop-team/PyBOP/pull/299) - Bugfix multiprocessing support for Linux, MacOS, Windows (WSL) and improves coverage. diff --git a/pybop/parameters/priors.py b/pybop/parameters/priors.py index 9ed144cc..10472e17 100644 --- a/pybop/parameters/priors.py +++ b/pybop/parameters/priors.py @@ -26,7 +26,7 @@ def __init__(self): def pdf(self, x): """ - Calculates the probability density function (PDF) of the Gaussian distribution at x. + Calculates the probability density function (PDF) of the distribution at x. Parameters ---------- @@ -42,7 +42,7 @@ def pdf(self, x): def logpdf(self, x): """ - Calculates the logarithm of the probability density function of the Gaussian distribution at x. + Calculates the logarithm of the probability density function of the distribution at x. Parameters ---------- @@ -58,17 +58,19 @@ def logpdf(self, x): def rvs(self, size=1, random_state=None): """ - Generates random variates from the Gaussian distribution. + Generates random variates from the distribution. Parameters ---------- size : int The number of random variates to generate. + random_state : int, optional + The random state seed for reproducibility. Default is None. Returns ------- array_like - An array of random variates from the Gaussian distribution. + An array of random variates from the distribution. Raises ------ @@ -90,7 +92,7 @@ def rvs(self, size=1, random_state=None): def __repr__(self): """ - Returns a string representation of the Gaussian object. + Returns a string representation of the object. """ return f"{self.__class__.__name__}, loc: {self.loc}, scale: {self.scale}" diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 01efe33e..3bf1c6d4 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -170,7 +170,10 @@ def test_scipy_prior_resampling( # If small sigma, expect a ValueError due inability to resample a non np.inf cost if expect_exception: - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match="The initial parameter values return an infinite cost.", + ): opt.run() else: opt.run() From 0c1421caf6b85f0a9a0901be2102806771c22319 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 16 May 2024 18:05:36 +0100 Subject: [PATCH 10/12] fix(test): Shrink parameterisation test solution distribution This commit reduces the possible variance of solutions for the parameterisation test, it also updates hyper-parameters, defends against IC samples that equal the ground truth solution. The priors are also reduced to improve robustness --- tests/integration/test_parameterisations.py | 26 ++++++++++++--------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 48372eb5..5ee78308 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -13,7 +13,7 @@ class TestModelParameterisation: @pytest.fixture(autouse=True) def setup(self): self.ground_truth = np.array([0.55, 0.55]) + np.random.normal( - loc=0.0, scale=0.05, size=2 + loc=0.0, scale=0.04, size=2 ) @pytest.fixture @@ -26,12 +26,12 @@ def parameters(self): return [ pybop.Parameter( "Negative electrode active material volume fraction", - prior=pybop.Uniform(0.35, 0.75), - bounds=[0.375, 0.75], + prior=pybop.Uniform(0.4, 0.7), + bounds=[0.375, 0.725], ), pybop.Parameter( "Positive electrode active material volume fraction", - prior=pybop.Uniform(0.35, 0.75), + prior=pybop.Uniform(0.4, 0.7), # no bounds ), ] @@ -95,13 +95,14 @@ def spm_costs(self, model, parameters, cost_class, init_soc): ) @pytest.mark.integration def test_spm_optimisers(self, optimiser, spm_costs): + x0 = spm_costs.x0 # Some optimisers require a complete set of bounds if optimiser in [ pybop.SciPyDifferentialEvolution, pybop.PSO, ]: spm_costs.problem.parameters[1].set_bounds( - [0.3, 0.8] + [0.375, 0.725] ) # Large range to ensure IC within bounds bounds = {"lower": [], "upper": []} for param in spm_costs.problem.parameters: @@ -120,7 +121,7 @@ def test_spm_optimisers(self, optimiser, spm_costs): ) else: parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.035 + cost=spm_costs, optimiser=optimiser, sigma0=0.02 ) elif optimiser in [pybop.SciPyMinimize]: parameterisation = pybop.Optimisation( @@ -134,13 +135,14 @@ def test_spm_optimisers(self, optimiser, spm_costs): cost=spm_costs, optimiser=optimiser, sigma0=0.05 ) - parameterisation.set_max_unchanged_iterations(iterations=55, threshold=1e-5) + parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) parameterisation.set_max_iterations(125) - initial_cost = parameterisation.cost(spm_costs.x0) + initial_cost = parameterisation.cost(x0) x, final_cost = parameterisation.run() # Assertions - assert initial_cost > final_cost + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost if pybamm_version <= "23.9": if optimiser in [pybop.GradientDescent, pybop.PSO, pybop.SciPyMinimize]: @@ -194,10 +196,11 @@ def spm_two_signal_cost(self, parameters, model, cost_class): ) @pytest.mark.integration def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): + x0 = spm_two_signal_cost.x0 # Some optimisers require a complete set of bounds if multi_optimiser in [pybop.SciPyDifferentialEvolution]: spm_two_signal_cost.problem.parameters[1].set_bounds( - [0.3, 0.8] + [0.375, 0.725] ) # Large range to ensure IC within bounds bounds = {"lower": [], "upper": []} for param in spm_two_signal_cost.problem.parameters: @@ -217,7 +220,8 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): x, final_cost = parameterisation.run() # Assertions - assert initial_cost > final_cost + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) @pytest.mark.parametrize("init_soc", [0.4, 0.6]) From 0af5a87cca770f36f2099e64677f158d2894a235 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 17 May 2024 11:21:59 +0100 Subject: [PATCH 11/12] fix(test): splits flaky optimisers to simple model, adds flaky plugin --- pyproject.toml | 1 + tests/integration/test_parameterisations.py | 44 ++------ .../test_thevenin_parameterisation.py | 104 ++++++++++++++++++ 3 files changed, 113 insertions(+), 36 deletions(-) create mode 100644 tests/integration/test_thevenin_parameterisation.py diff --git a/pyproject.toml b/pyproject.toml index 14ec41be..bf6080e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ dev = [ "pytest-cov", "pytest-mock", "pytest-xdist", + "flaky", "ruff", ] scifem = [ diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py index 5ee78308..341a9c41 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_parameterisations.py @@ -1,11 +1,12 @@ import numpy as np import pytest +from flaky import flaky from pybamm import __version__ as pybamm_version import pybop -class TestModelParameterisation: +class TestParameterisation: """ A class to test the model parameterisation methods. """ @@ -81,25 +82,22 @@ def spm_costs(self, model, parameters, cost_class, init_soc): @pytest.mark.parametrize( "optimiser", [ - pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution, pybop.Adam, pybop.CMAES, - pybop.GradientDescent, pybop.IRPropMin, pybop.NelderMead, - pybop.PSO, pybop.SNES, pybop.XNES, ], ) + @flaky(max_runs=3, min_passes=1) @pytest.mark.integration def test_spm_optimisers(self, optimiser, spm_costs): x0 = spm_costs.x0 # Some optimisers require a complete set of bounds if optimiser in [ pybop.SciPyDifferentialEvolution, - pybop.PSO, ]: spm_costs.problem.parameters[1].set_bounds( [0.375, 0.725] @@ -112,28 +110,9 @@ def test_spm_optimisers(self, optimiser, spm_costs): spm_costs.bounds = bounds # Test each optimiser - if optimiser in [pybop.GradientDescent]: - if isinstance( - spm_costs, (pybop.GaussianLogLikelihoodKnownSigma, pybop.MAP) - ): - parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=5e-5 - ) - else: - parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.02 - ) - elif optimiser in [pybop.SciPyMinimize]: - parameterisation = pybop.Optimisation( - cost=spm_costs, - optimiser=optimiser, - sigma0=0.05, - allow_infeasible_solutions=False, - ) - else: - parameterisation = pybop.Optimisation( - cost=spm_costs, optimiser=optimiser, sigma0=0.05 - ) + parameterisation = pybop.Optimisation( + cost=spm_costs, optimiser=optimiser, sigma0=0.05 + ) parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) parameterisation.set_max_iterations(125) @@ -143,17 +122,10 @@ def test_spm_optimisers(self, optimiser, spm_costs): # Assertions if not np.allclose(x0, self.ground_truth, atol=1e-5): assert initial_cost > final_cost - if pybamm_version <= "23.9": - if optimiser in [pybop.GradientDescent, pybop.PSO, pybop.SciPyMinimize]: - np.testing.assert_allclose(x, self.ground_truth, atol=4.0e-2) - else: - np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) + np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) else: - if optimiser in [pybop.GradientDescent, pybop.PSO, pybop.SciPyMinimize]: - np.testing.assert_allclose(x, self.ground_truth, atol=3.0e-2) - else: - np.testing.assert_allclose(x, self.ground_truth, atol=1.75e-2) + np.testing.assert_allclose(x, self.ground_truth, atol=1.75e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, cost_class): diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py new file mode 100644 index 00000000..2f609a0e --- /dev/null +++ b/tests/integration/test_thevenin_parameterisation.py @@ -0,0 +1,104 @@ +import numpy as np +import pytest +from flaky import flaky + +import pybop + + +class TestTheveninParameterisation: + """ + A class to test the flaky optimisers on a simple model. + """ + + @pytest.fixture(autouse=True) + def setup(self): + # Set random seed for reproducibility + np.random.seed(0) + self.ground_truth = np.array([0.05, 0.05]) + np.random.normal( + loc=0.0, scale=0.01, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet( + json_path="examples/scripts/parameters/initial_ecm_parameters.json" + ) + parameter_set.import_parameters() + return pybop.empirical.Thevenin(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return [ + pybop.Parameter( + "R0 [Ohm]", + prior=pybop.Gaussian(0.05, 0.01), + bounds=[0.01, 0.1], + ), + pybop.Parameter( + "R1 [Ohm]", + prior=pybop.Gaussian(0.05, 0.01), + bounds=[0.01, 0.1], + ), + ] + + @pytest.fixture(params=[pybop.RootMeanSquaredError, pybop.SumSquaredError]) + def cost_class(self, request): + return request.param + + @pytest.fixture + def cost(self, model, parameters, cost_class): + # Form dataset + solution = self.getdata(model, self.ground_truth) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data, + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset) + return cost_class(problem) + + @pytest.mark.parametrize( + "optimiser", + [pybop.SciPyMinimize, pybop.GradientDescent, pybop.PSO], + ) + @flaky(max_runs=8, min_passes=1) # These can be very flaky + @pytest.mark.integration + def test_optimisers_on_simple_model(self, optimiser, cost): + x0 = cost.x0 + if optimiser in [pybop.GradientDescent]: + parameterisation = pybop.Optimisation( + cost=cost, optimiser=optimiser, sigma0=2.5e-4 + ) + else: + parameterisation = pybop.Optimisation( + cost=cost, optimiser=optimiser, sigma0=0.03 + ) + + parameterisation.set_max_unchanged_iterations(iterations=55, threshold=1e-5) + parameterisation.set_max_iterations(250) + initial_cost = parameterisation.cost(x0) + x, final_cost = parameterisation.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1e-2) + + def getdata(self, model, x): + model.parameter_set.update( + { + "R0 [Ohm]": x[0], + "R1 [Ohm]": x[1], + } + ) + experiment = pybop.Experiment( + [ + ("Discharge at 0.5C for 2 minutes (1 second period)",), + ] + ) + sim = model.predict(experiment=experiment) + return sim From 0d0e21452b87410028fb776f1ea9e7d1961603b5 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Fri, 17 May 2024 14:05:57 +0100 Subject: [PATCH 12/12] fix: name change for concistency, revert solution distribution shrink --- CHANGELOG.md | 2 +- ...est_parameterisations.py => test_spm_parameterisations.py} | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename tests/integration/{test_parameterisations.py => test_spm_parameterisations.py} (99%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98150d4e..c5022403 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ codesigned binaries and source distributions via `sigstore-python`. ## Bug Fixes -- [#321](https://github.com/pybop-team/PyBOP/pull/321) - Improves `integration/test_parameterisation` stability. +- [#321](https://github.com/pybop-team/PyBOP/pull/321) - Improves `integration/test_spm_parameterisation.py` stability, adds flakly pytest plugin, and `test_thevenin_parameterisation.py` integration test. - [#330](https://github.com/pybop-team/PyBOP/issues/330) - Fixes implementation of default plotting options. - [#317](https://github.com/pybop-team/PyBOP/pull/317) - Installs seed packages into `nox` sessions, ensuring that scheduled tests can pass. - [#308](https://github.com/pybop-team/PyBOP/pull/308) - Enables testing on both macOS Intel and macOS ARM (Silicon) runners and fixes the scheduled tests. diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_spm_parameterisations.py similarity index 99% rename from tests/integration/test_parameterisations.py rename to tests/integration/test_spm_parameterisations.py index 341a9c41..02ca9ef8 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -6,7 +6,7 @@ import pybop -class TestParameterisation: +class Test_SPM_Parameterisation: """ A class to test the model parameterisation methods. """ @@ -14,7 +14,7 @@ class TestParameterisation: @pytest.fixture(autouse=True) def setup(self): self.ground_truth = np.array([0.55, 0.55]) + np.random.normal( - loc=0.0, scale=0.04, size=2 + loc=0.0, scale=0.05, size=2 ) @pytest.fixture