diff --git a/CHANGELOG.md b/CHANGELOG.md index 73d3f53c..c5022403 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Features +- [#321](https://github.com/pybop-team/PyBOP/pull/321) - Updates Prior classes with BaseClass, adds a `problem.sample_initial_conditions` method to improve stability of SciPy.Minimize optimiser. - [#249](https://github.com/pybop-team/PyBOP/pull/249) - Add WeppnerHuggins model and GITT example. - [#304](https://github.com/pybop-team/PyBOP/pull/304) - Decreases the testing suite completion time. - [#301](https://github.com/pybop-team/PyBOP/pull/301) - Updates default echem solver to "fast with events" mode. @@ -19,6 +20,7 @@ codesigned binaries and source distributions via `sigstore-python`. ## Bug Fixes +- [#321](https://github.com/pybop-team/PyBOP/pull/321) - Improves `integration/test_spm_parameterisation.py` stability, adds flakly pytest plugin, and `test_thevenin_parameterisation.py` integration test. - [#330](https://github.com/pybop-team/PyBOP/issues/330) - Fixes implementation of default plotting options. - [#317](https://github.com/pybop-team/PyBOP/pull/317) - Installs seed packages into `nox` sessions, ensuring that scheduled tests can pass. - [#308](https://github.com/pybop-team/PyBOP/pull/308) - Enables testing on both macOS Intel and macOS ARM (Silicon) runners and fixes the scheduled tests. diff --git a/pybop/__init__.py b/pybop/__init__.py index 034dcb4a..fdef8c81 100644 --- a/pybop/__init__.py +++ b/pybop/__init__.py @@ -118,7 +118,7 @@ # from .parameters.parameter import Parameter from .parameters.parameter_set import ParameterSet -from .parameters.priors import Gaussian, Uniform, Exponential +from .parameters.priors import BasePrior, Gaussian, Uniform, Exponential # diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py index 185a09ab..dd51237b 100644 --- a/pybop/optimisers/scipy_optimisers.py +++ b/pybop/optimisers/scipy_optimisers.py @@ -25,6 +25,7 @@ def __init__(self, method=None, bounds=None, maxiter=None, tol=1e-5): super().__init__() self.method = method self.bounds = bounds + self.num_resamples = 40 self.tol = tol self.options = {} self._max_iterations = maxiter @@ -56,11 +57,21 @@ def _runoptimise(self, cost_function, x0): def callback(x): self.log.append([x]) - # Scale the cost function and eliminate nan values + # Check x0 and resample if required self.cost0 = cost_function(x0) - self.inf_count = 0 if np.isinf(self.cost0): - raise Exception("The initial parameter values return an infinite cost.") + for i in range(1, self.num_resamples): + x0 = cost_function.problem.sample_initial_conditions(seed=i) + self.cost0 = cost_function(x0) + if not np.isinf(self.cost0): + break + if np.isinf(self.cost0): + raise ValueError( + "The initial parameter values return an infinite cost." + ) + + # Scale the cost function and eliminate nan values + self.inf_count = 0 def cost_wrapper(x): cost = cost_function(x) / self.cost0 diff --git a/pybop/parameters/parameter.py b/pybop/parameters/parameter.py index c1b4ad1c..52b700bb 100644 --- a/pybop/parameters/parameter.py +++ b/pybop/parameters/parameter.py @@ -42,7 +42,7 @@ def __init__( self.set_bounds(bounds) self.margin = 1e-4 - def rvs(self, n_samples): + def rvs(self, n_samples, random_state=None): """ Draw random samples from the parameter's prior distribution. @@ -59,7 +59,7 @@ def rvs(self, n_samples): array-like An array of samples drawn from the prior distribution within the parameter's bounds. """ - samples = self.prior.rvs(n_samples) + samples = self.prior.rvs(n_samples, random_state=random_state) # Constrain samples to be within bounds if self.bounds is not None: diff --git a/pybop/parameters/priors.py b/pybop/parameters/priors.py index bdb64eb2..10472e17 100644 --- a/pybop/parameters/priors.py +++ b/pybop/parameters/priors.py @@ -2,172 +2,169 @@ import scipy.stats as stats -class Gaussian: +class BasePrior: """ - Represents a Gaussian (normal) distribution with a given mean and standard deviation. + A base class for defining prior distributions. - This class provides methods to calculate the probability density function (pdf), - the logarithm of the pdf, and to generate random variates (rvs) from the distribution. + This class provides a foundation for implementing various prior distributions. + It includes methods for calculating the probability density function (PDF), + log probability density function (log PDF), and generating random variates + from the distribution. - Parameters + Attributes ---------- - mean : float - The mean (mu) of the Gaussian distribution. - sigma : float - The standard deviation (sigma) of the Gaussian distribution. + prior : scipy.stats.rv_continuous + The underlying continuous random variable distribution. + loc : float + The location parameter of the distribution. + scale : float + The scale parameter of the distribution. """ - def __init__(self, mean, sigma): - self.name = "Gaussian" - self.mean = mean - self.sigma = sigma + def __init__(self): + pass def pdf(self, x): """ - Calculates the probability density function of the Gaussian distribution at x. + Calculates the probability density function (PDF) of the distribution at x. Parameters ---------- x : float - The point at which to evaluate the pdf. + The point(s) at which to evaluate the pdf. Returns ------- float The probability density function value at x. """ - return stats.norm.pdf(x, loc=self.mean, scale=self.sigma) + return self.prior.pdf(x, loc=self.loc, scale=self.scale) def logpdf(self, x): """ - Calculates the logarithm of the probability density function of the Gaussian distribution at x. + Calculates the logarithm of the probability density function of the distribution at x. Parameters ---------- x : float - The point at which to evaluate the log pdf. + The point(s) at which to evaluate the log pdf. Returns ------- float The logarithm of the probability density function value at x. """ - return stats.norm.logpdf(x, loc=self.mean, scale=self.sigma) + return self.prior.logpdf(x, loc=self.loc, scale=self.scale) - def rvs(self, size): + def rvs(self, size=1, random_state=None): """ - Generates random variates from the Gaussian distribution. + Generates random variates from the distribution. Parameters ---------- size : int The number of random variates to generate. + random_state : int, optional + The random state seed for reproducibility. Default is None. Returns ------- array_like - An array of random variates from the Gaussian distribution. + An array of random variates from the distribution. Raises ------ ValueError If the size parameter is negative. """ - if size < 0: - raise ValueError("size must be positive") - else: - return stats.norm.rvs(loc=self.mean, scale=self.sigma, size=size) + if not isinstance(size, (int, tuple)): + raise ValueError( + "size must be a positive integer or tuple of positive integers" + ) + if isinstance(size, int) and size < 1: + raise ValueError("size must be a positive integer") + if isinstance(size, tuple) and any(s < 1 for s in size): + raise ValueError("size must be a tuple of positive integers") + + return self.prior.rvs( + loc=self.loc, scale=self.scale, size=size, random_state=random_state + ) def __repr__(self): """ - Returns a string representation of the Gaussian object. + Returns a string representation of the object. """ - return f"{self.name}, mean: {self.mean}, sigma: {self.sigma}" - - -class Uniform: - """ - Represents a uniform distribution over a specified interval. + return f"{self.__class__.__name__}, loc: {self.loc}, scale: {self.scale}" - This class provides methods to calculate the pdf, the log pdf, and to generate - random variates from the distribution. - - Parameters - ---------- - lower : float - The lower bound of the distribution. - upper : float - The upper bound of the distribution. - """ - - def __init__(self, lower, upper): - self.name = "Uniform" - self.lower = lower - self.upper = upper - - def pdf(self, x): + @property + def mean(self): """ - Calculates the probability density function of the uniform distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the pdf. + Get the mean of the distribution. Returns ------- float - The probability density function value at x. + The mean of the distribution. """ - return stats.uniform.pdf(x, loc=self.lower, scale=self.upper - self.lower) + return self.loc - def logpdf(self, x): + @property + def sigma(self): """ - Calculates the logarithm of the pdf of the uniform distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the log pdf. + Get the standard deviation of the distribution. Returns ------- float - The log of the probability density function value at x. + The standard deviation of the distribution. """ - return stats.uniform.logpdf(x, loc=self.lower, scale=self.upper - self.lower) + return self.scale - def rvs(self, size): - """ - Generates random variates from the uniform distribution. - Parameters - ---------- - size : int - The number of random variates to generate. +class Gaussian(BasePrior): + """ + Represents a Gaussian (normal) distribution with a given mean and standard deviation. - Returns - ------- - array_like - An array of random variates from the uniform distribution. + This class provides methods to calculate the probability density function (pdf), + the logarithm of the pdf, and to generate random variates (rvs) from the distribution. - Raises - ------ - ValueError - If the size parameter is negative. - """ - if size < 0: - raise ValueError("size must be positive") - else: - return stats.uniform.rvs( - loc=self.lower, scale=self.upper - self.lower, size=size - ) + Parameters + ---------- + mean : float + The mean (mu) of the Gaussian distribution. + sigma : float + The standard deviation (sigma) of the Gaussian distribution. + """ - def __repr__(self): - """ - Returns a string representation of the Uniform object. - """ - return f"{self.name}, lower: {self.lower}, upper: {self.upper}" + def __init__(self, mean, sigma, random_state=None): + self.name = "Gaussian" + self.loc = mean + self.scale = sigma + self.prior = stats.norm + + +class Uniform(BasePrior): + """ + Represents a uniform distribution over a specified interval. + + This class provides methods to calculate the pdf, the log pdf, and to generate + random variates from the distribution. + + Parameters + ---------- + lower : float + The lower bound of the distribution. + upper : float + The upper bound of the distribution. + """ + + def __init__(self, lower, upper, random_state=None): + self.name = "Uniform" + self.lower = lower + self.upper = upper + self.loc = lower + self.scale = upper - lower + self.prior = stats.uniform @property def mean(self): @@ -184,7 +181,7 @@ def sigma(self): return (self.upper - self.lower) / (2 * np.sqrt(3)) -class Exponential: +class Exponential(BasePrior): """ Represents an exponential distribution with a specified scale parameter. @@ -197,82 +194,8 @@ class Exponential: The scale parameter (lambda) of the exponential distribution. """ - def __init__(self, scale): + def __init__(self, scale, loc=0, random_state=None): self.name = "Exponential" + self.loc = loc self.scale = scale - - def pdf(self, x): - """ - Calculates the probability density function of the exponential distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the pdf. - - Returns - ------- - float - The probability density function value at x. - """ - return stats.expon.pdf(x, scale=self.scale) - - def logpdf(self, x): - """ - Calculates the logarithm of the pdf of the exponential distribution at x. - - Parameters - ---------- - x : float - The point at which to evaluate the log pdf. - - Returns - ------- - float - The log of the probability density function value at x. - """ - return stats.expon.logpdf(x, scale=self.scale) - - def rvs(self, size): - """ - Generates random variates from the exponential distribution. - - Parameters - ---------- - size : int - The number of random variates to generate. - - Returns - ------- - array_like - An array of random variates from the exponential distribution. - - Raises - ------ - ValueError - If the size parameter is negative. - """ - if size < 0: - raise ValueError("size must be positive") - else: - return stats.expon.rvs(scale=self.scale, size=size) - - def __repr__(self): - """ - Returns a string representation of the Uniform object. - """ - return f"{self.name}, scale: {self.scale}" - - @property - def mean(self): - """ - Returns the mean of the distribution. - """ - return self.scale - - @property - def sigma(self): - """ - Returns the standard deviation of the distribution. - """ - return self.scale + self.prior = stats.expon diff --git a/pybop/problems/base_problem.py b/pybop/problems/base_problem.py index 37a6af25..530bad10 100644 --- a/pybop/problems/base_problem.py +++ b/pybop/problems/base_problem.py @@ -80,18 +80,47 @@ def __init__( if not all_have_sigma: self.sigma0 = None - # Sample from prior for x0 - if x0 is None: - self.x0 = np.zeros(self.n_parameters) - for i, param in enumerate(self.parameters): - self.x0[i] = param.rvs(1) - elif len(x0) != self.n_parameters: + # Set initial conditions + if self.x0 is None: + self.x0 = self.sample_initial_conditions() + elif len(self.x0) != self.n_parameters: raise ValueError("x0 dimensions do not match number of parameters") # Add the initial values to the parameter definitions for i, param in enumerate(self.parameters): param.update(initial_value=self.x0[i]) + def sample_initial_conditions( + self, num_samples: int = 1, seed: int = None + ) -> np.ndarray: + """ + Sample initial conditions for the problem. + + Parameters + ---------- + num_samples : int, optional + The number of initial condition samples to generate (default is 1). + seed : int, optional + The seed value for the random number generator (default is None). + + Returns + ------- + np.ndarray + An array of initial conditions for the problem. Each row corresponds to a sample. + """ + # Create a local random generator + rng = np.random.default_rng(seed) + + # Initialize + x0 = np.zeros((num_samples, self.n_parameters)) + + # Vectorized sampling from the parameters + for j, param in enumerate(self.parameters): + x0[:, j] = param.rvs(num_samples, random_state=rng) + + # Return a flattened array if only one sample, otherwise return the 2D array + return x0[0] if num_samples == 1 else x0 + def evaluate(self, x): """ Evaluate the model with the given parameters and return the signal. diff --git a/pyproject.toml b/pyproject.toml index 14ec41be..bf6080e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ dev = [ "pytest-cov", "pytest-mock", "pytest-xdist", + "flaky", "ruff", ] scifem = [ diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_spm_parameterisations.py similarity index 85% rename from tests/integration/test_parameterisations.py rename to tests/integration/test_spm_parameterisations.py index 32bd4033..02ca9ef8 100644 --- a/tests/integration/test_parameterisations.py +++ b/tests/integration/test_spm_parameterisations.py @@ -1,10 +1,12 @@ import numpy as np import pytest +from flaky import flaky +from pybamm import __version__ as pybamm_version import pybop -class TestModelParameterisation: +class Test_SPM_Parameterisation: """ A class to test the model parameterisation methods. """ @@ -25,12 +27,12 @@ def parameters(self): return [ pybop.Parameter( "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.55, 0.05), - bounds=[0.375, 0.75], + prior=pybop.Uniform(0.4, 0.7), + bounds=[0.375, 0.725], ), pybop.Parameter( "Positive electrode active material volume fraction", - prior=pybop.Gaussian(0.55, 0.05), + prior=pybop.Uniform(0.4, 0.7), # no bounds ), ] @@ -80,24 +82,25 @@ def spm_costs(self, model, parameters, cost_class, init_soc): @pytest.mark.parametrize( "optimiser", [ - pybop.SciPyMinimize, pybop.SciPyDifferentialEvolution, pybop.Adam, pybop.CMAES, - pybop.GradientDescent, pybop.IRPropMin, pybop.NelderMead, - pybop.PSO, pybop.SNES, pybop.XNES, ], ) + @flaky(max_runs=3, min_passes=1) @pytest.mark.integration def test_spm_optimisers(self, optimiser, spm_costs): + x0 = spm_costs.x0 # Some optimisers require a complete set of bounds - if optimiser in [pybop.SciPyDifferentialEvolution, pybop.PSO]: + if optimiser in [ + pybop.SciPyDifferentialEvolution, + ]: spm_costs.problem.parameters[1].set_bounds( - [0.3, 0.8] + [0.375, 0.725] ) # Large range to ensure IC within bounds bounds = {"lower": [], "upper": []} for param in spm_costs.problem.parameters: @@ -110,30 +113,19 @@ def test_spm_optimisers(self, optimiser, spm_costs): parameterisation = pybop.Optimisation( cost=spm_costs, optimiser=optimiser, sigma0=0.05 ) + parameterisation.set_max_unchanged_iterations(iterations=35, threshold=1e-5) parameterisation.set_max_iterations(125) - - initial_cost = parameterisation.cost(spm_costs.x0) - - if optimiser in [pybop.GradientDescent]: - if isinstance( - spm_costs, (pybop.GaussianLogLikelihoodKnownSigma, pybop.MAP) - ): - parameterisation.optimiser.set_learning_rate(1.8e-5) - else: - parameterisation.optimiser.set_learning_rate(0.015) - x, final_cost = parameterisation.run() - - elif optimiser in [pybop.SciPyMinimize]: - parameterisation.cost.problem.model.allow_infeasible_solutions = False - x, final_cost = parameterisation.run() - - else: - x, final_cost = parameterisation.run() + initial_cost = parameterisation.cost(x0) + x, final_cost = parameterisation.run() # Assertions - assert initial_cost > final_cost - np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost + if pybamm_version <= "23.9": + np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) + else: + np.testing.assert_allclose(x, self.ground_truth, atol=1.75e-2) @pytest.fixture def spm_two_signal_cost(self, parameters, model, cost_class): @@ -176,10 +168,11 @@ def spm_two_signal_cost(self, parameters, model, cost_class): ) @pytest.mark.integration def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): + x0 = spm_two_signal_cost.x0 # Some optimisers require a complete set of bounds if multi_optimiser in [pybop.SciPyDifferentialEvolution]: spm_two_signal_cost.problem.parameters[1].set_bounds( - [0.3, 0.8] + [0.375, 0.725] ) # Large range to ensure IC within bounds bounds = {"lower": [], "upper": []} for param in spm_two_signal_cost.problem.parameters: @@ -199,7 +192,8 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost): x, final_cost = parameterisation.run() # Assertions - assert initial_cost > final_cost + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2) @pytest.mark.parametrize("init_soc", [0.4, 0.6]) @@ -248,8 +242,8 @@ def getdata(self, model, x, init_soc): experiment = pybop.Experiment( [ ( - "Discharge at 0.5C for 3 minutes (2 second period)", - "Charge at 0.5C for 3 minutes (2 second period)", + "Discharge at 0.5C for 6 minutes (4 second period)", + "Charge at 0.5C for 6 minutes (4 second period)", ), ] * 2 diff --git a/tests/integration/test_thevenin_parameterisation.py b/tests/integration/test_thevenin_parameterisation.py new file mode 100644 index 00000000..2f609a0e --- /dev/null +++ b/tests/integration/test_thevenin_parameterisation.py @@ -0,0 +1,104 @@ +import numpy as np +import pytest +from flaky import flaky + +import pybop + + +class TestTheveninParameterisation: + """ + A class to test the flaky optimisers on a simple model. + """ + + @pytest.fixture(autouse=True) + def setup(self): + # Set random seed for reproducibility + np.random.seed(0) + self.ground_truth = np.array([0.05, 0.05]) + np.random.normal( + loc=0.0, scale=0.01, size=2 + ) + + @pytest.fixture + def model(self): + parameter_set = pybop.ParameterSet( + json_path="examples/scripts/parameters/initial_ecm_parameters.json" + ) + parameter_set.import_parameters() + return pybop.empirical.Thevenin(parameter_set=parameter_set) + + @pytest.fixture + def parameters(self): + return [ + pybop.Parameter( + "R0 [Ohm]", + prior=pybop.Gaussian(0.05, 0.01), + bounds=[0.01, 0.1], + ), + pybop.Parameter( + "R1 [Ohm]", + prior=pybop.Gaussian(0.05, 0.01), + bounds=[0.01, 0.1], + ), + ] + + @pytest.fixture(params=[pybop.RootMeanSquaredError, pybop.SumSquaredError]) + def cost_class(self, request): + return request.param + + @pytest.fixture + def cost(self, model, parameters, cost_class): + # Form dataset + solution = self.getdata(model, self.ground_truth) + dataset = pybop.Dataset( + { + "Time [s]": solution["Time [s]"].data, + "Current function [A]": solution["Current [A]"].data, + "Voltage [V]": solution["Voltage [V]"].data, + } + ) + + # Define the cost to optimise + problem = pybop.FittingProblem(model, parameters, dataset) + return cost_class(problem) + + @pytest.mark.parametrize( + "optimiser", + [pybop.SciPyMinimize, pybop.GradientDescent, pybop.PSO], + ) + @flaky(max_runs=8, min_passes=1) # These can be very flaky + @pytest.mark.integration + def test_optimisers_on_simple_model(self, optimiser, cost): + x0 = cost.x0 + if optimiser in [pybop.GradientDescent]: + parameterisation = pybop.Optimisation( + cost=cost, optimiser=optimiser, sigma0=2.5e-4 + ) + else: + parameterisation = pybop.Optimisation( + cost=cost, optimiser=optimiser, sigma0=0.03 + ) + + parameterisation.set_max_unchanged_iterations(iterations=55, threshold=1e-5) + parameterisation.set_max_iterations(250) + initial_cost = parameterisation.cost(x0) + x, final_cost = parameterisation.run() + + # Assertions + if not np.allclose(x0, self.ground_truth, atol=1e-5): + assert initial_cost > final_cost + np.testing.assert_allclose(x, self.ground_truth, atol=1e-2) + + def getdata(self, model, x): + model.parameter_set.update( + { + "R0 [Ohm]": x[0], + "R1 [Ohm]": x[1], + } + ) + experiment = pybop.Experiment( + [ + ("Discharge at 0.5C for 2 minutes (1 second period)",), + ] + ) + sim = model.predict(experiment=experiment) + return sim diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 54674c95..3bf1c6d4 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -54,7 +54,6 @@ def cost(self, model, one_parameter, dataset): model, one_parameter, dataset, - signal=["Voltage [V]"], ) return pybop.SumSquaredError(problem) @@ -64,7 +63,6 @@ def two_param_cost(self, model, two_parameters, dataset): model, two_parameters, dataset, - signal=["Voltage [V]"], ) return pybop.SumSquaredError(problem) @@ -142,6 +140,44 @@ def test_prior_sampling(self, cost): assert opt.x0 <= 0.62 and opt.x0 >= 0.58 + @pytest.mark.unit + @pytest.mark.parametrize( + "mean, sigma, expect_exception", + [ + (0.85, 0.2, False), + (0.85, 0.001, True), + ], + ) + def test_scipy_prior_resampling( + self, model, dataset, mean, sigma, expect_exception + ): + # Set up the parameter with a Gaussian prior + parameter = pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(mean, sigma), + bounds=[0.55, 0.95], + ) + + # Define the problem and cost + problem = pybop.FittingProblem(model, [parameter], dataset) + cost = pybop.SumSquaredError(problem) + + # Create the optimisation class with infeasible solutions disabled + opt = pybop.Optimisation( + cost=cost, optimiser=pybop.SciPyMinimize, allow_infeasible_solutions=False + ) + opt.set_max_iterations(1) + + # If small sigma, expect a ValueError due inability to resample a non np.inf cost + if expect_exception: + with pytest.raises( + ValueError, + match="The initial parameter values return an infinite cost.", + ): + opt.run() + else: + opt.run() + @pytest.mark.unit def test_halting(self, cost): # Test max evalutions diff --git a/tests/unit/test_parameters.py b/tests/unit/test_parameters.py index 2ec2fcd5..19649cb9 100644 --- a/tests/unit/test_parameters.py +++ b/tests/unit/test_parameters.py @@ -31,7 +31,7 @@ def test_parameter_construction(self, parameter): def test_parameter_repr(self, parameter): assert ( repr(parameter) - == "Parameter: Negative electrode active material volume fraction \n Prior: Gaussian, mean: 0.6, sigma: 0.02 \n Bounds: [0.375, 0.7] \n Value: 0.6" + == "Parameter: Negative electrode active material volume fraction \n Prior: Gaussian, loc: 0.6, scale: 0.02 \n Bounds: [0.375, 0.7] \n Value: 0.6" ) @pytest.mark.unit diff --git a/tests/unit/test_priors.py b/tests/unit/test_priors.py index 4edaea58..b773049f 100644 --- a/tests/unit/test_priors.py +++ b/tests/unit/test_priors.py @@ -21,6 +21,11 @@ def Uniform(self): def Exponential(self): return pybop.Exponential(scale=1) + @pytest.mark.unit + def test_base_prior(self): + base = pybop.BasePrior() + assert isinstance(base, pybop.BasePrior) + @pytest.mark.unit def test_priors(self, Gaussian, Uniform, Exponential): # Test pdf @@ -38,7 +43,7 @@ def test_priors(self, Gaussian, Uniform, Exponential): np.testing.assert_allclose( Uniform.sigma, (Uniform.upper - Uniform.lower) / (2 * np.sqrt(3)), atol=1e-8 ) - assert Exponential.mean == Exponential.scale + assert Exponential.mean == Exponential.loc assert Exponential.sigma == Exponential.scale @pytest.mark.unit @@ -49,6 +54,13 @@ def test_gaussian_rvs(self, Gaussian): assert abs(mean - 0.5) < 0.2 assert abs(std - 1) < 0.2 + @pytest.mark.unit + def test_incorrect_rvs(self, Gaussian): + with pytest.raises(ValueError): + Gaussian.rvs(size="a") + with pytest.raises(ValueError): + Gaussian.rvs(size=(1, 2, -1)) + @pytest.mark.unit def test_uniform_rvs(self, Uniform): samples = Uniform.rvs(size=500) @@ -63,9 +75,9 @@ def test_exponential_rvs(self, Exponential): @pytest.mark.unit def test_repr(self, Gaussian, Uniform, Exponential): - assert repr(Gaussian) == "Gaussian, mean: 0.5, sigma: 1" - assert repr(Uniform) == "Uniform, lower: 0, upper: 1" - assert repr(Exponential) == "Exponential, scale: 1" + assert repr(Gaussian) == "Gaussian, loc: 0.5, scale: 1" + assert repr(Uniform) == "Uniform, loc: 0, scale: 1" + assert repr(Exponential) == "Exponential, loc: 0, scale: 1" @pytest.mark.unit def test_invalid_size(self, Gaussian, Uniform, Exponential):