Skip to content

Commit

Permalink
Merge pull request #413 from pybop-team/327b-weighted-cost
Browse files Browse the repository at this point in the history
Additions for #327 - Enable `DesignCosts` in `WeightedCost`
  • Loading branch information
BradyPlanden committed Aug 1, 2024
2 parents 66cf05f + 1bedcd7 commit 5b163ae
Show file tree
Hide file tree
Showing 15 changed files with 498 additions and 281 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

## Features

- [#413](https://github.com/pybop-team/PyBOP/pull/413) - Adds `DesignCost` functionality to `WeightedCost` class with additional tests.
- [#357](https://github.com/pybop-team/PyBOP/pull/357) - Adds `Transformation()` class with `LogTransformation()`, `IdentityTransformation()`, and `ScaledTransformation()`, `ComposedTransformation()` implementations with corresponding examples and tests.
- [#427](https://github.com/pybop-team/PyBOP/issues/427) - Adds the nbstripout pre-commit hook to remove unnecessary metadata from notebooks.
- [#327](https://github.com/pybop-team/PyBOP/issues/327) - Adds the `WeightedCost` subclass, defines when to evaluate a problem and adds the `spm_weighted_cost` example script.
Expand Down
27 changes: 20 additions & 7 deletions examples/scripts/spm_weighted_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,37 @@

# Generate data
sigma = 0.001
t_eval = np.arange(0, 900, 3)
values = model.predict(t_eval=t_eval)
corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval))
init_soc = 0.5
experiment = pybop.Experiment(
[
(
"Discharge at 0.5C for 3 minutes (3 second period)",
"Charge at 0.5C for 3 minutes (3 second period)",
),
]
* 2
)
values = model.predict(experiment=experiment, init_soc=init_soc)


def noise(sigma):
return np.random.normal(0, sigma, len(values["Voltage [V]"].data))


# Form dataset
dataset = pybop.Dataset(
{
"Time [s]": t_eval,
"Time [s]": values["Time [s]"].data,
"Current function [A]": values["Current [A]"].data,
"Voltage [V]": corrupt_values,
"Voltage [V]": values["Voltage [V]"].data + noise(sigma),
}
)

# Generate problem, cost function, and optimisation class
problem = pybop.FittingProblem(model, parameters, dataset)
problem = pybop.FittingProblem(model, parameters, dataset, init_soc=init_soc)
cost1 = pybop.SumSquaredError(problem)
cost2 = pybop.RootMeanSquaredError(problem)
weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[1, 100])
weighted_cost = pybop.WeightedCost(cost1, cost2, weights=[0.1, 1])

for cost in [weighted_cost, cost1, cost2]:
optim = pybop.IRPropMin(cost, max_iterations=60)
Expand Down
22 changes: 10 additions & 12 deletions examples/scripts/spme_max_energy.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@
# electrode widths, particle radii, volume fractions and
# separator width.

# NOTE: This script can be easily adjusted to consider the volumetric
# (instead of gravimetric) energy density by changing the line which
# defines the cost and changing the output to:
# print(f"Initial volumetric energy density: {cost(optim.x0):.2f} Wh.m-3")
# print(f"Optimised volumetric energy density: {final_cost:.2f} Wh.m-3")

# Define parameter set and model
parameter_set = pybop.ParameterSet.pybamm("Chen2020", formation_concentrations=True)
model = pybop.lithium_ion.SPMe(parameter_set=parameter_set)
Expand Down Expand Up @@ -45,17 +39,21 @@
model, parameters, experiment, signal=signal, init_soc=init_soc
)

# Generate cost function and optimisation class:
cost = pybop.GravimetricEnergyDensity(problem)
# Generate multiple cost functions and combine them.
cost1 = pybop.GravimetricEnergyDensity(problem, update_capacity=True)
cost2 = pybop.VolumetricEnergyDensity(problem, update_capacity=True)
cost = pybop.WeightedCost(cost1, cost2, weights=[1, 1])

# Run optimisation
optim = pybop.PSO(
cost, verbose=True, allow_infeasible_solutions=False, max_iterations=15
)

# Run optimisation
x, final_cost = optim.run()
print("Estimated parameters:", x)
print(f"Initial gravimetric energy density: {cost(optim.x0):.2f} Wh.kg-1")
print(f"Optimised gravimetric energy density: {final_cost:.2f} Wh.kg-1")
print(f"Initial gravimetric energy density: {cost1(optim.x0):.2f} Wh.kg-1")
print(f"Optimised gravimetric energy density: {cost1(x):.2f} Wh.kg-1")
print(f"Initial volumetric energy density: {cost2(optim.x0):.2f} Wh.m-3")
print(f"Optimised volumetric energy density: {cost2(x):.2f} Wh.m-3")

# Plot the timeseries output
if cost.update_capacity:
Expand Down
2 changes: 1 addition & 1 deletion examples/standalone/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(
)
self._target = {signal: self._dataset[signal] for signal in self.signal}

def evaluate(self, inputs):
def evaluate(self, inputs, **kwargs):
"""
Evaluate the model with the given parameters and return the signal.
Expand Down
62 changes: 22 additions & 40 deletions pybop/costs/_likelihoods.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,15 @@ def _evaluate(self, inputs: Inputs) -> float:
"""
Evaluates the Gaussian log-likelihood for the given parameters with known sigma.
"""
if not self.verify_prediction(self._current_prediction):
if not self.verify_prediction(self.y):
return -np.inf

e = np.asarray(
[
np.sum(
self._offset
+ self._multip
* np.sum(
(self._target[signal] - self._current_prediction[signal]) ** 2.0
)
* np.sum((self._target[signal] - self.y[signal]) ** 2.0)
)
for signal in self.signal
]
Expand All @@ -65,20 +63,15 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]:
"""
Calculates the log-likelihood and gradient.
"""
if not self.verify_prediction(self._current_prediction):
if not self.verify_prediction(self.y):
return -np.inf, -self._de * np.ones(self.n_parameters)

likelihood = self._evaluate(inputs)

r = np.asarray(
[
self._target[signal] - self._current_prediction[signal]
for signal in self.signal
]
)
dl = np.sum(
(np.sum((r * self._current_sensitivities.T), axis=2) / self.sigma2), axis=1
[self._target[signal] - self.y[signal] for signal in self.signal]
)
dl = np.sum((np.sum((r * self.dy.T), axis=2) / self.sigma2), axis=1)

return likelihood, dl

Expand Down Expand Up @@ -123,7 +116,7 @@ def __init__(
super().__init__(problem)
self._dsigma_scale = dsigma_scale
self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi)
self._fixed_problem = False # keep problem evaluation within _evaluate
self._has_separable_problem = False

self.sigma = Parameters()
self._add_sigma_parameters(sigma0)
Expand Down Expand Up @@ -196,20 +189,16 @@ def _evaluate(self, inputs: Inputs) -> float:
if np.any(sigma <= 0):
return -np.inf

self._current_prediction = self.problem.evaluate(
self.problem.parameters.as_dict()
)
if not self.verify_prediction(self._current_prediction):
self.y = self.problem.evaluate(self.problem.parameters.as_dict())
if not self.verify_prediction(self.y):
return -np.inf

e = np.asarray(
[
np.sum(
self._logpi
- self.n_time_data * np.log(sigma)
- np.sum(
(self._target[signal] - self._current_prediction[signal]) ** 2.0
)
- np.sum((self._target[signal] - self.y[signal]) ** 2.0)
/ (2.0 * sigma**2.0)
)
for signal in self.signal
Expand Down Expand Up @@ -238,23 +227,16 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]:
if np.any(sigma <= 0):
return -np.inf, -self._de * np.ones(self.n_parameters)

self._current_prediction, self._current_sensitivities = self.problem.evaluateS1(
self.problem.parameters.as_dict()
)
if not self.verify_prediction(self._current_prediction):
self.y, self.dy = self.problem.evaluateS1(self.problem.parameters.as_dict())
if not self.verify_prediction(self.y):
return -np.inf, -self._de * np.ones(self.n_parameters)

likelihood = self._evaluate(inputs)

r = np.asarray(
[
self._target[signal] - self._current_prediction[signal]
for signal in self.signal
]
)
dl = np.sum(
(np.sum((r * self._current_sensitivities.T), axis=2) / (sigma**2.0)), axis=1
[self._target[signal] - self.y[signal] for signal in self.signal]
)
dl = np.sum((np.sum((r * self.dy.T), axis=2) / (sigma**2.0)), axis=1)
dsigma = (
-self.n_time_data / sigma + np.sum(r**2.0, axis=1) / (sigma**3.0)
) / self._dsigma_scale
Expand Down Expand Up @@ -296,6 +278,9 @@ def __init__(self, problem, likelihood, sigma0=None, gradient_step=1e-3):
):
raise ValueError(f"{self.likelihood} must be a subclass of BaseLikelihood")

self.parameters = self.likelihood.parameters
self._has_separable_problem = self.likelihood._has_separable_problem

def _evaluate(self, inputs: Inputs) -> float:
"""
Calculate the maximum a posteriori cost for a given set of parameters.
Expand All @@ -317,9 +302,9 @@ def _evaluate(self, inputs: Inputs) -> float:
if not np.isfinite(log_prior).any():
return -np.inf

if self._fixed_problem:
self.likelihood._current_prediction = self._current_prediction
log_likelihood = self.likelihood._evaluate(inputs)
if self._has_separable_problem:
self.likelihood.y = self.y
log_likelihood = self.likelihood.evaluate(inputs)

posterior = log_likelihood + log_prior
return posterior
Expand Down Expand Up @@ -351,12 +336,9 @@ def _evaluateS1(self, inputs: Inputs) -> tuple[float, np.ndarray]:
if not np.isfinite(log_prior).any():
return -np.inf, -self._de * np.ones(self.n_parameters)

if self._fixed_problem:
(
self.likelihood._current_prediction,
self.likelihood._current_sensitivities,
) = self._current_prediction, self._current_sensitivities
log_likelihood, dl = self.likelihood._evaluateS1(inputs)
if self._has_separable_problem:
self.likelihood.y, self.likelihood.dy = (self.y, self.dy)
log_likelihood, dl = self.likelihood.evaluateS1(inputs)

# Compute a finite difference approximation of the gradient of the log prior
delta = self.parameters.initial_value() * self.gradient_step
Expand Down
Loading

0 comments on commit 5b163ae

Please sign in to comment.