Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make likelihoods a type of cost #230

Merged
merged 7 commits into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/standalone/cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class StandaloneCost(pybop.BaseCost):
BaseCost interface.
x0 : array-like
The initial guess for the optimization problem, set to [4.2].
n_parameters : int
_n_parameters : int
The number of parameters in the model, which is 1 in this case.
bounds : dict
A dictionary containing the lower and upper bounds for the parameter,
Expand All @@ -40,7 +40,7 @@ def __init__(self, problem=None):
super().__init__(problem)

self.x0 = np.array([4.2])
self.n_parameters = len(self.x0)
self._n_parameters = len(self.x0)

self.bounds = dict(
lower=[-1],
Expand Down
11 changes: 5 additions & 6 deletions pybop/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,6 @@
#
from ._problem import BaseProblem, FittingProblem, DesignProblem

#
# Likelihood classes
#
from ._likelihoods import BaseLikelihood, GaussianLogLikelihood, GaussianLogLikelihoodKnownSigma

#
# Cost function class
#
Expand All @@ -41,13 +36,17 @@
RootMeanSquaredError,
SumSquaredError,
ObserverCost,
ProbabilityCost,
)
from .costs.design_costs import (
DesignCost,
GravimetricEnergyDensity,
VolumetricEnergyDensity,
)
from .costs._likelihoods import (
BaseLikelihood,
GaussianLogLikelihood,
GaussianLogLikelihoodKnownSigma,
)

#
# Dataset class
Expand Down
21 changes: 8 additions & 13 deletions pybop/_optimisation.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
Initial parameter values for the optimization.
bounds : dict
Dictionary containing the parameter bounds with keys 'lower' and 'upper'.
n_parameters : int
_n_parameters : int
Number of parameters in the optimization problem.
sigma0 : float or sequence
Initial step size or standard deviation for the optimiser.
Expand All @@ -40,27 +40,24 @@
def __init__(
self,
cost,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we want users to overwrite the initial conditions, right? This modification should keep that open.

Suggested change
cost,
cost,
x0=None,

x0=None,
optimiser=None,
sigma0=None,
verbose=False,
physical_viability=True,
allow_infeasible_solutions=True,
):
self.cost = cost
self.x0 = x0
self.x0 = cost.x0
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As mentioned above. This keeps it open :)

Suggested change
self.x0 = cost.x0
self.x0 = x0 or cost.x0

self.optimiser = optimiser
self.verbose = verbose
self.bounds = cost.bounds
self.sigma0 = sigma0 or cost.sigma0
self.n_parameters = cost.n_parameters
self._n_parameters = cost._n_parameters
self.physical_viability = physical_viability
self.allow_infeasible_solutions = allow_infeasible_solutions
self.log = []

# Catch x0, and convert to pints vector
if x0 is None:
self.x0 = cost.x0
# Convert x0 to pints vector
self._x0 = pints.vector(self.x0)

# Set whether to allow infeasible locations
Expand All @@ -77,12 +74,10 @@
self._transformation = None

# Check if minimising or maximising
self._minimising = not isinstance(cost, pybop.BaseLikelihood)
if self._minimising:
self._function = self.cost
else:
self._function = pybop.ProbabilityCost(cost)
del cost
if isinstance(cost, pybop.BaseLikelihood):
self.cost._minimising = False

Check warning on line 78 in pybop/_optimisation.py

View check run for this annotation

Codecov / codecov/patch

pybop/_optimisation.py#L78

Added line #L78 was not covered by tests
self._minimising = self.cost._minimising
self._function = self.cost

# Construct Optimiser
self.pints = True
Expand Down
30 changes: 9 additions & 21 deletions pybop/_likelihoods.py → pybop/costs/_likelihoods.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,19 @@
import numpy as np
from pybop.costs.base_cost import BaseCost


class BaseLikelihood:
class BaseLikelihood(BaseCost):
"""
Base class for likelihoods
"""

def __init__(self, problem, sigma=None):
self.problem = problem
super(BaseLikelihood, self).__init__(problem)
self._n_output = problem.n_outputs
self._n_times = problem.n_time_data
self.sigma0 = sigma or np.zeros(self._n_output)
self.x0 = problem.x0
self.bounds = problem.bounds
self._n_parameters = problem.n_parameters
self._target = problem._target

def __call__(self, x):
"""
Calls the problem.evaluate method and calculates
the log-likelihood
"""
raise NotImplementedError
self.log_likelihood = problem
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This can be removed as it was only required for the ProbabilityCost() wrapper.

Suggested change
self.log_likelihood = problem


def set_sigma(self, sigma):
"""
Expand All @@ -44,10 +36,6 @@ def get_n_parameters(self):
"""
return self._n_parameters

@property
def n_parameters(self):
return self._n_parameters


class GaussianLogLikelihoodKnownSigma(BaseLikelihood):
"""
Expand All @@ -68,15 +56,15 @@ def __init__(self, problem, sigma=None):
self.sigma2 = self.sigma0**-2
self._dl = np.ones(self._n_parameters)

def __call__(self, x):
def _evaluate(self, x, grad=None):
"""
Calls the problem.evaluate method and calculates
the log-likelihood
"""
e = self._target - self.problem.evaluate(x)
return np.sum(self._offset + self._multip * np.sum(e**2, axis=0))

def _evaluateS1(self, x):
def _evaluateS1(self, x, grad=None):
"""
Calls the problem.evaluateS1 method and calculates
the log-likelihood
Expand Down Expand Up @@ -116,7 +104,7 @@ def __init__(self, problem):
self._logpi = -0.5 * self._n_times * np.log(2 * np.pi)
self._dl = np.ones(self._n_parameters + self._n_output)

def __call__(self, x):
def _evaluate(self, x, grad=None):
"""
Evaluates the Gaussian log-likelihood for the given parameters.

Expand All @@ -140,7 +128,7 @@ def __call__(self, x):
- np.sum(e**2, axis=0) / (2.0 * sigma**2)
)

def _evaluateS1(self, x):
def _evaluateS1(self, x, grad=None):
"""
Calls the problem.evaluateS1 method and calculates
the log-likelihood
Expand All @@ -163,7 +151,7 @@ def _evaluateS1(self, x):
)
)
e = self._target - y
likelihood = self.__call__(x)
likelihood = self._evaluate(x)
dl = np.sum((sigma**-(2.0) * np.sum((e.T * dy.T), axis=2)), axis=1)

# Add sigma gradient to dl
Expand Down
28 changes: 21 additions & 7 deletions pybop/costs/base_cost.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from pybop import BaseProblem
from pybop import BaseLikelihood


class BaseCost:
Expand All @@ -26,7 +25,7 @@
Initial standard deviation around ``x0``. Either a scalar value (one
standard deviation for all coordinates) or an array with one entry
per dimension. Not all methods will use this information.
n_parameters : int
_n_parameters : int
The number of parameters in the model.
n_outputs : int
The number of outputs in the model.
Expand All @@ -37,19 +36,28 @@
self.x0 = None
self.bounds = None
self.sigma0 = None
self._minimising = True
if isinstance(self.problem, BaseProblem):
self._target = problem._target
self.x0 = problem.x0
self.bounds = problem.bounds
self.sigma0 = problem.sigma0
self.n_parameters = problem.n_parameters
self._n_parameters = problem.n_parameters
self.n_outputs = problem.n_outputs
elif isinstance(self.problem, BaseLikelihood):
self.log_likelihood = problem

@property
def n_parameters(self):
return self._n_parameters

def __call__(self, x, grad=None):
"""
Call the evaluate function for a given set of parameters.
"""
return self.evaluate(x, grad)

def evaluate(self, x, grad=None):
"""
Call the evaluate function for a given set of parameters.

Parameters
----------
Expand All @@ -70,7 +78,10 @@
If an error occurs during the calculation of the cost.
"""
try:
return self._evaluate(x, grad)
if self._minimising:
return self._evaluate(x, grad)
else: # minimise the negative cost
return -self._evaluate(x, grad)

Check warning on line 84 in pybop/costs/base_cost.py

View check run for this annotation

Codecov / codecov/patch

pybop/costs/base_cost.py#L84

Added line #L84 was not covered by tests

except NotImplementedError as e:
raise e
Expand Down Expand Up @@ -125,7 +136,10 @@
If an error occurs during the calculation of the cost or gradient.
"""
try:
return self._evaluateS1(x)
if self._minimising:
return self._evaluateS1(x)
else: # minimise the negative cost
return -self._evaluateS1(x)

Check warning on line 142 in pybop/costs/base_cost.py

View check run for this annotation

Codecov / codecov/patch

pybop/costs/base_cost.py#L142

Added line #L142 was not covered by tests

except NotImplementedError as e:
raise e
Expand Down
63 changes: 4 additions & 59 deletions pybop/costs/fitting_costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,13 @@
y, dy = self.problem.evaluateS1(x)
if len(y) < len(self._target):
e = np.float64(np.inf)
de = self._de * np.ones(self.n_parameters)
de = self._de * np.ones(self._n_parameters)

Check warning on line 73 in pybop/costs/fitting_costs.py

View check run for this annotation

Codecov / codecov/patch

pybop/costs/fitting_costs.py#L73

Added line #L73 was not covered by tests
else:
dy = dy.reshape(
(
self.problem.n_time_data,
self.n_outputs,
self.n_parameters,
self._n_parameters,
)
)
r = y - self._target
Expand Down Expand Up @@ -177,13 +177,13 @@
y, dy = self.problem.evaluateS1(x)
if len(y) < len(self._target):
e = np.float64(np.inf)
de = self._de * np.ones(self.n_parameters)
de = self._de * np.ones(self._n_parameters)

Check warning on line 180 in pybop/costs/fitting_costs.py

View check run for this annotation

Codecov / codecov/patch

pybop/costs/fitting_costs.py#L180

Added line #L180 was not covered by tests
else:
dy = dy.reshape(
(
self.problem.n_time_data,
self.n_outputs,
self.n_parameters,
self._n_parameters,
)
)
r = y - self._target
Expand All @@ -208,61 +208,6 @@
self._de = de


class ProbabilityCost(BaseCost):
"""
Probability based cost function.

Changes the sign of the log likelihood to make it a cost function.

Inherits all parameters and attributes from ``BaseCost``.
"""

def __init__(self, log_likelihood):
super(ProbabilityCost, self).__init__(log_likelihood)

def _evaluate(self, x, grad=None):
"""
Calculate the probability based cost for a given set of parameters.

Parameters
----------
x : array-like
The parameters for which to evaluate the cost.
grad : array-like, optional
An array to store the gradient of the cost function with respect
to the parameters.

Returns
-------
float
The probability based cost.
"""
return -self.log_likelihood(x)

def _evaluateS1(self, x):
"""
Compute the cost and its gradient with respect to the parameters.

Parameters
----------
x : array-like
The parameters for which to compute the cost and gradient.

Returns
-------
tuple
A tuple containing the cost and the gradient. The cost is a float,
and the gradient is an array-like of the same length as `x`.

Raises
------
ValueError
If an error occurs during the calculation of the cost or gradient.
"""
likelihood, dl = self.log_likelihood._evaluateS1(x)
return -likelihood, -dl


class ObserverCost(BaseCost):
"""
Observer cost function.
Expand Down
6 changes: 1 addition & 5 deletions tests/unit/test_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,16 +70,12 @@ def problem(self, model, parameters, dataset, signal, x0, request):
pybop.RootMeanSquaredError,
pybop.SumSquaredError,
pybop.ObserverCost,
pybop.ProbabilityCost,
]
)
def cost(self, problem, request):
cls = request.param
if cls in [pybop.SumSquaredError, pybop.RootMeanSquaredError]:
return cls(problem)
elif cls in [pybop.ProbabilityCost]:
likelihood = pybop.GaussianLogLikelihoodKnownSigma(problem, sigma=0.01)
return cls(likelihood)
elif cls in [pybop.ObserverCost]:
inputs = {p.name: problem.x0[i] for i, p in enumerate(problem.parameters)}
state = problem._model.reinit(inputs)
Expand Down Expand Up @@ -144,7 +140,7 @@ def test_costs(self, cost):
# Test option setting
cost.set_fail_gradient(1)

if isinstance(cost, (pybop.SumSquaredError, pybop.ProbabilityCost)):
if isinstance(cost, pybop.SumSquaredError):
e, de = cost.evaluateS1([0.5])

assert type(e) == np.float64
Expand Down
6 changes: 0 additions & 6 deletions tests/unit/test_likelihoods.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,6 @@ def test_base_likelihood_init(self, problem):
assert likelihood._n_parameters == 1
assert np.array_equal(likelihood._target, problem._target)

@pytest.mark.unit
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we still want this test? Looking at the above class, BaseLikelihood still returns NotImplemented from call(). It would be good to keep a test on that.

def test_base_likelihood_call_raises_not_implemented_error(self, problem):
likelihood = pybop.BaseLikelihood(problem)
with pytest.raises(NotImplementedError):
likelihood(np.array([0.5, 0.5]))

@pytest.mark.unit
def test_base_likelihood_set_get_sigma(self, problem):
likelihood = pybop.BaseLikelihood(problem)
Expand Down
Loading
Loading