\n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "
\n",
- " "
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
+ "image/svg+xml": [
+ "
"
]
},
"metadata": {},
@@ -437,61 +384,8 @@
"outputs": [
{
"data": {
- "text/html": [
- " \n",
- " "
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
+ "image/svg+xml": [
+ "
"
]
},
"metadata": {},
@@ -499,61 +393,8 @@
},
{
"data": {
- "text/html": [
- " \n",
- " "
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
+ "image/svg+xml": [
+ "
"
]
},
"metadata": {},
@@ -581,90 +422,8 @@
"outputs": [
{
"data": {
- "text/html": [
- " \n",
- " "
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- " \n",
- " "
+ "image/svg+xml": [
+ "
"
]
},
"metadata": {},
@@ -672,32 +431,8 @@
},
{
"data": {
- "text/html": [
- "
"
+ "image/svg+xml": [
+ "
"
]
},
"metadata": {},
From d7a71a75b24dd3c245a13b2543be720a241d5ccc Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Thu, 22 Feb 2024 09:29:43 +0000
Subject: [PATCH 28/64] fix missed deletion during merge
---
setup.py | 51 ---------------------------------------------------
1 file changed, 51 deletions(-)
delete mode 100644 setup.py
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 8d53b6e9..00000000
--- a/setup.py
+++ /dev/null
@@ -1,51 +0,0 @@
-from distutils.core import setup
-import os
-from setuptools import find_packages
-
-# User-friendly description from README.md
-current_directory = os.path.dirname(os.path.abspath(__file__))
-try:
- with open(os.path.join(current_directory, "README.md"), encoding="utf-8") as f:
- long_description = f.read()
-except Exception:
- long_description = ""
-
-# Defines __version__
-root = os.path.abspath(os.path.dirname(__file__))
-with open(os.path.join(root, "pybop", "version.py")) as f:
- exec(f.read())
-
-setup(
- name="pybop",
- packages=find_packages("."),
- version=__version__, # noqa F821
- license="BSD-3-Clause",
- description="Python Battery Optimisation and Parameterisation",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/pybop-team/PyBOP",
- install_requires=[
- "pybamm>=23.5",
- "numpy>=1.16",
- "scipy>=1.3",
- "pandas>=1.0",
- "pints>=0.5",
- ],
- extras_require={
- "plot": ["plotly>=5.0", "kaleido>=0.2"],
- "all": ["pybop[plot]"],
- "docs": [
- "sphinx>=6",
- "pydata-sphinx-theme",
- "sphinx-autobuild",
- "sphinx-autoapi",
- "sphinx_copybutton",
- "sphinx_favicon",
- "sphinx_design",
- "myst-parser",
- ],
- },
- # https://pypi.org/classifiers/
- classifiers=[],
- python_requires=">=3.8,<=3.12",
-)
From 183df28aaf283c85179916bd82bded32bc1a92b2 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Fri, 23 Feb 2024 14:35:44 +0000
Subject: [PATCH 29/64] Update test_plots.py
---
tests/unit/test_plots.py | 31 +++++++++++++++----------------
1 file changed, 15 insertions(+), 16 deletions(-)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 11e935f0..fff8074b 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -13,15 +13,9 @@ def model(self):
# Define an example model
return pybop.lithium_ion.SPM()
- @pytest.mark.unit
- def test_model_plots(self):
- # Test plotting of Model objects
- pass
-
@pytest.fixture
- def problem(self, model):
- # Define an example problem
- parameters = [
+ def parameters(self):
+ return [
pybop.Parameter(
"Negative particle radius [m]",
prior=pybop.Gaussian(6e-06, 0.1e-6),
@@ -34,12 +28,14 @@ def problem(self, model):
),
]
+ @pytest.fixture
+ def dataset(self, model):
# Generate data
t_eval = np.arange(0, 50, 2)
values = model.predict(t_eval=t_eval)
# Form dataset
- dataset = pybop.Dataset(
+ return pybop.Dataset(
{
"Time [s]": t_eval,
"Current function [A]": values["Current [A]"].data,
@@ -47,13 +43,19 @@ def problem(self, model):
}
)
- # Generate problem
+ @pytest.mark.unit
+ def test_dataset_plots(self, dataset):
+ # Test plotting of Dataset objects
+ pybop.plot_dataset(dataset, signal=["Voltage [V]"])
+
+ @pytest.fixture
+ def problem(self, model, parameters, dataset):
return pybop.FittingProblem(model, parameters, dataset)
@pytest.mark.unit
- def test_problem_plots(self):
+ def test_problem_plots(self, problem):
# Test plotting of Problem objects
- pass
+ pybop.quick_plot(problem, title="Optimised Comparison")
@pytest.fixture
def cost(self, problem):
@@ -63,9 +65,6 @@ def cost(self, problem):
@pytest.mark.unit
def test_cost_plots(self, cost):
# Test plotting of Cost objects
- pybop.quick_plot(cost.x0, cost, title="Optimised Comparison")
-
- # Plot the cost landscape
pybop.plot_cost2d(cost, steps=5)
@pytest.fixture
@@ -84,4 +83,4 @@ def test_optim_plots(self, optim):
pybop.plot_parameters(optim)
# Plot the cost landscape with optimisation path
- pybop.plot_cost2d(optim.cost, optim=optim, steps=5)
+ pybop.plot_optim2d(optim, steps=5)
From 4cf9108c85a11caf38f7efc94f5adee57d292e34 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Fri, 1 Mar 2024 12:30:47 +0000
Subject: [PATCH 30/64] Revamp model, problem, and cost object from numpy
arrays to dictionary. Update tests, add base model classes to init, cleaner
multi-signal interaction
---
examples/scripts/spm_CMAES.py | 4 +-
examples/scripts/spm_adam.py | 17 ++-
examples/scripts/spm_scipymin.py | 4 +-
examples/standalone/problem.py | 16 +--
pybop/_problem.py | 39 ++++--
pybop/costs/design_costs.py | 14 ++-
pybop/costs/fitting_costs.py | 127 ++++++++++++++------
pybop/models/base_model.py | 31 +++--
pybop/models/empirical/__init__.py | 2 +-
pybop/models/lithium_ion/__init__.py | 2 +-
pybop/models/lithium_ion/echem_base.py | 11 +-
pybop/plotting/plot_problem.py | 24 ++--
tests/integration/test_parameterisations.py | 8 +-
tests/unit/test_cost.py | 6 +-
tests/unit/test_models.py | 4 +-
tests/unit/test_problem.py | 4 +-
tests/unit/test_standalone.py | 11 +-
17 files changed, 212 insertions(+), 112 deletions(-)
diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py
index 170d560b..816f6798 100644
--- a/examples/scripts/spm_CMAES.py
+++ b/examples/scripts/spm_CMAES.py
@@ -33,11 +33,13 @@
"Time [s]": t_eval,
"Current function [A]": values["Current [A]"].data,
"Voltage [V]": corrupt_values,
+ "Bulk open-circuit voltage [V]": values["Bulk open-circuit voltage [V]"].data,
}
)
+signal = ["Voltage [V]", "Bulk open-circuit voltage [V]"]
# Generate problem, cost function, and optimisation class
-problem = pybop.FittingProblem(model, parameters, dataset)
+problem = pybop.FittingProblem(model, parameters, dataset, signal=signal)
cost = pybop.SumSquaredError(problem)
optim = pybop.Optimisation(cost, optimiser=pybop.CMAES)
optim.set_max_iterations(100)
diff --git a/examples/scripts/spm_adam.py b/examples/scripts/spm_adam.py
index b8f3d2f5..56d315f4 100644
--- a/examples/scripts/spm_adam.py
+++ b/examples/scripts/spm_adam.py
@@ -20,7 +20,7 @@
]
# Generate data
-sigma = 0.001
+sigma = 0.01
t_eval = np.arange(0, 900, 2)
values = model.predict(t_eval=t_eval)
corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval))
@@ -31,14 +31,23 @@
"Time [s]": t_eval,
"Current function [A]": values["Current [A]"].data,
"Voltage [V]": corrupt_values,
+ "Bulk open-circuit voltage [V]": values["Bulk open-circuit voltage [V]"].data,
}
)
+signal = ["Voltage [V]", "Bulk open-circuit voltage [V]"]
# Generate problem, cost function, and optimisation class
-problem = pybop.FittingProblem(model, parameters, dataset)
-cost = pybop.SumSquaredError(problem)
-optim = pybop.Optimisation(cost, optimiser=pybop.Adam, verbose=True)
+problem = pybop.FittingProblem(model, parameters, dataset, signal=signal)
+cost = pybop.RootMeanSquaredError(problem)
+optim = pybop.Optimisation(
+ cost,
+ optimiser=pybop.Adam,
+ verbose=True,
+ allow_infeasible_solutions=True,
+ sigma0=sigma,
+)
optim.set_max_iterations(100)
+optim.set_max_unchanged_iterations(20)
# Run optimisation
x, final_cost = optim.run()
diff --git a/examples/scripts/spm_scipymin.py b/examples/scripts/spm_scipymin.py
index 759f8c2e..db1d783d 100644
--- a/examples/scripts/spm_scipymin.py
+++ b/examples/scripts/spm_scipymin.py
@@ -21,12 +21,12 @@
parameters = [
pybop.Parameter(
"Negative electrode active material volume fraction",
- prior=pybop.Gaussian(0.6, 0.05),
+ prior=pybop.Gaussian(0.6, 0.02),
bounds=[0.5, 0.8],
),
pybop.Parameter(
"Positive electrode active material volume fraction",
- prior=pybop.Gaussian(0.48, 0.05),
+ prior=pybop.Gaussian(0.48, 0.02),
bounds=[0.4, 0.7],
),
]
diff --git a/examples/standalone/problem.py b/examples/standalone/problem.py
index 5a29138e..bc9cd31d 100644
--- a/examples/standalone/problem.py
+++ b/examples/standalone/problem.py
@@ -14,10 +14,13 @@ def __init__(
model=None,
check_model=True,
signal=None,
+ default_variables=None,
init_soc=None,
x0=None,
):
- super().__init__(parameters, model, check_model, signal, init_soc, x0)
+ super().__init__(
+ parameters, model, check_model, signal, default_variables, init_soc, x0
+ )
self._dataset = dataset.data
# Check that the dataset contains time and current
@@ -37,8 +40,7 @@ def __init__(
raise ValueError(
f"Time data and {signal} data must be the same length."
)
- target = [self._dataset[signal] for signal in self.signal]
- self._target = np.vstack(target).T
+ self._target = {signal: self._dataset[signal] for signal in self.signal}
def evaluate(self, x):
"""
@@ -55,7 +57,7 @@ def evaluate(self, x):
The model output y(t) simulated with inputs x.
"""
- return x[0] * self._time_data + x[1]
+ return {signal: x[0] * self._time_data + x[1] for signal in self.signal}
def evaluateS1(self, x):
"""
@@ -73,8 +75,8 @@ def evaluateS1(self, x):
with given inputs x.
"""
- y = x[0] * self._time_data + x[1]
+ y = {signal: x[0] * self._time_data + x[1] for signal in self.signal}
- dy = np.dstack([self._time_data, np.zeros(self._time_data.shape)])
+ dy = [self._time_data, np.zeros(self._time_data.shape)]
- return (np.asarray(y), np.asarray(dy))
+ return (y, np.asarray(dy))
diff --git a/pybop/_problem.py b/pybop/_problem.py
index 2ecc5951..e2aaee3d 100644
--- a/pybop/_problem.py
+++ b/pybop/_problem.py
@@ -1,4 +1,5 @@
import numpy as np
+import pybop
class BaseProblem:
@@ -27,6 +28,7 @@ def __init__(
model=None,
check_model=True,
signal=["Voltage [V]"],
+ default_variables=[],
init_soc=None,
x0=None,
):
@@ -45,6 +47,11 @@ def __init__(
self._time_data = None
self._target = None
+ if isinstance(model, (pybop.BaseModel, pybop.lithium_ion.EChemBaseModel)):
+ self.default_variables = default_variables
+ else:
+ self.default_variables = []
+
# Set bounds
self.bounds = dict(
lower=[param.bounds[0] for param in self.parameters],
@@ -148,10 +155,13 @@ def __init__(
dataset,
check_model=True,
signal=["Voltage [V]"],
+ default_variables=["Time [s]", "Discharge capacity [A.h]"],
init_soc=None,
x0=None,
):
- super().__init__(parameters, model, check_model, signal, init_soc, x0)
+ super().__init__(
+ parameters, model, check_model, signal, default_variables, init_soc, x0
+ )
self._dataset = dataset.data
self.x = self.x0
@@ -161,12 +171,13 @@ def __init__(
# Unpack time and target data
self._time_data = self._dataset["Time [s]"]
self.n_time_data = len(self._time_data)
- target = [self._dataset[signal] for signal in self.signal]
- self._target = np.vstack(target).T
+ self._target = {signal: self._dataset[signal] for signal in self.signal}
# Add useful parameters to model
if model is not None:
self._model.signal = self.signal
+ self._model.default_variables = self.default_variables
+ self._model.n_parameters = self.n_parameters
self._model.n_outputs = self.n_outputs
self._model.n_time_data = self.n_time_data
@@ -193,14 +204,14 @@ def evaluate(self, x):
y : np.ndarray
The model output y(t) simulated with inputs x.
"""
- if (x != self.x).any() and self._model.matched_parameters:
+ if np.any(x != self.x) and self._model.matched_parameters:
for i, param in enumerate(self.parameters):
param.update(value=x[i])
self._model.rebuild(parameters=self.parameters)
self.x = x
- y = np.asarray(self._model.simulate(inputs=x, t_eval=self._time_data))
+ y = self._model.simulate(inputs=x, t_eval=self._time_data)
return y
@@ -229,7 +240,7 @@ def evaluateS1(self, x):
t_eval=self._time_data,
)
- return (np.asarray(y), np.asarray(dy))
+ return (y, np.asarray(dy))
class DesignProblem(BaseProblem):
@@ -255,10 +266,13 @@ def __init__(
experiment,
check_model=True,
signal=["Voltage [V]"],
+ default_variables=["Time [s]", "Current [A]", "Discharge capacity [A.h]"],
init_soc=None,
x0=None,
):
- super().__init__(parameters, model, check_model, signal, init_soc, x0)
+ super().__init__(
+ parameters, model, check_model, signal, default_variables, init_soc, x0
+ )
self.experiment = experiment
# Build the model if required
@@ -278,8 +292,9 @@ def __init__(
# Add an example dataset for plotting comparison
sol = self.evaluate(self.x0)
- self._time_data = sol[:, -1]
- self._target = sol[:, 0:-1]
+ self._time_data = sol["Time [s]"]
+ self._capacity_data = sol["Discharge capacity [A.h]"]
+ self._target = {key: sol[key] for key in self.signal}
self._dataset = None
def evaluate(self, x):
@@ -307,6 +322,8 @@ def evaluate(self, x):
return sol
else:
- predictions = [sol[signal].data for signal in self.signal + ["Time [s]"]]
+ predictions = {}
+ for signal in self.signal + self.default_variables:
+ predictions[signal] = sol[signal].data
- return np.vstack(predictions).T
+ return predictions
diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py
index 5af2ddfe..f2b452af 100644
--- a/pybop/costs/design_costs.py
+++ b/pybop/costs/design_costs.py
@@ -57,9 +57,13 @@ def update_simulation_data(self, initial_conditions):
if self.update_capacity:
self.problem.model.approximate_capacity(self.problem.x0)
solution = self.problem.evaluate(initial_conditions)
- self.problem._time_data = solution[:, -1]
- self.problem._target = solution[:, 0:-1]
- self.dt = solution[1, -1] - solution[0, -1]
+
+ if "Time [s]" not in solution:
+ raise ValueError("The solution does not contain time data.")
+ self.problem._time_data = solution["Time [s]"]
+ self.problem._capacity_data = solution["Discharge capacity [A.h]"]
+ self.problem._target = {key: solution[key] for key in self.problem.signal}
+ self.dt = solution["Time [s]"][1] - solution["Time [s]"][0]
def _evaluate(self, x, grad=None):
"""
@@ -123,7 +127,7 @@ def _evaluate(self, x, grad=None):
self.problem.model.approximate_capacity(x)
solution = self.problem.evaluate(x)
- voltage, current = solution[:, 0], solution[:, 1]
+ voltage, current = solution["Voltage [V]"], solution["Current [A]"]
negative_energy_density = -np.trapz(voltage * current, dx=self.dt) / (
3600 * self.problem.model.cell_mass(self.parameter_set)
)
@@ -181,7 +185,7 @@ def _evaluate(self, x, grad=None):
self.problem.model.approximate_capacity(x)
solution = self.problem.evaluate(x)
- voltage, current = solution[:, 0], solution[:, 1]
+ voltage, current = solution["Voltage [V]"], solution["Current [A]"]
negative_energy_density = -np.trapz(voltage * current, dx=self.dt) / (
3600 * self.problem.model.cell_volume(self.parameter_set)
)
diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py
index 8db3111c..d8bb212a 100644
--- a/pybop/costs/fitting_costs.py
+++ b/pybop/costs/fitting_costs.py
@@ -39,10 +39,23 @@ def _evaluate(self, x, grad=None):
"""
prediction = self.problem.evaluate(x)
- if len(prediction) < len(self._target):
- return np.float64(np.inf) # simulation stopped early
+ for key in prediction:
+ if key not in ["Time [s]", "Discharge capacity [A.h]"]:
+ if len(prediction.get(key, [])) != len(self._target.get(key, [])):
+ return np.float64(np.inf) # prediction doesn't match target
+
+ e = np.array(
+ [
+ np.sqrt(np.mean((prediction[signal] - self._target[signal]) ** 2))
+ for signal in prediction
+ if signal not in ["Time [s]", "Discharge capacity [A.h]"]
+ ]
+ )
+
+ if self.n_outputs == 1:
+ return e.item()
else:
- return np.sqrt(np.mean((prediction - self._target) ** 2))
+ return np.sum(e)
def _evaluateS1(self, x):
"""
@@ -65,24 +78,38 @@ def _evaluateS1(self, x):
If an error occurs during the calculation of the cost or gradient.
"""
y, dy = self.problem.evaluateS1(x)
- if len(y) < len(self._target):
- e = np.float64(np.inf)
- de = self._de * np.ones(self.n_parameters)
- else:
- dy = dy.reshape(
- (
- self.problem.n_time_data,
- self.n_outputs,
- self.n_parameters,
- )
- )
- r = y - self._target
- e = np.sqrt(np.mean((r) ** 2))
- de = np.mean((r.T * dy.T), axis=2) / np.sqrt(
- np.mean((r.T * dy.T) ** 2, axis=2)
+
+ for key in y:
+ if key not in ["Time [s]", "Discharge capacity [A.h]"]:
+ if len(y.get(key, [])) != len(self._target.get(key, [])):
+ e = np.float64(np.inf)
+ de = self._de * np.ones(self.n_parameters)
+ return e, de
+
+ r = np.array(
+ [
+ y[signal] - self._target[signal]
+ for signal in y
+ if signal not in ["Time [s]", "Discharge capacity [A.h]"]
+ ]
+ )
+
+ if self.n_outputs == 1:
+ r = r.reshape(self.problem.n_time_data)
+ dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
+ e = np.sqrt(np.mean(r**2))
+ de = np.mean((r * dy), axis=1) / np.sqrt(
+ np.mean((r * dy) ** 2, axis=1) + np.finfo(float).eps
)
+ return e.item(), de.flatten()
- return e, de.flatten()
+ else:
+ r = r.reshape(self.n_outputs, self.problem.n_time_data)
+ e = np.sqrt(np.mean(r**2, axis=1))
+ de = np.mean((r[:, :, np.newaxis] * dy), axis=1) / np.sqrt(
+ np.mean((r[:, :, np.newaxis] * dy) ** 2, axis=1) + np.finfo(float).eps
+ )
+ return np.sum(e), np.sum(de, axis=1)
class SumSquaredError(BaseCost):
@@ -128,13 +155,22 @@ def _evaluate(self, x, grad=None):
"""
prediction = self.problem.evaluate(x)
- if len(prediction) < len(self._target):
- return np.float64(np.inf) # simulation stopped early
+ for key in prediction:
+ if key not in ["Time [s]", "Discharge capacity [A.h]"]:
+ if len(prediction.get(key, [])) != len(self._target.get(key, [])):
+ return np.float64(np.inf) # prediction doesn't match target
+
+ e = np.array(
+ [
+ np.sum(((prediction[signal] - self._target[signal]) ** 2), axis=0)
+ for signal in prediction
+ if signal not in ["Time [s]", "Discharge capacity [A.h]"]
+ ]
+ )
+ if self.n_outputs == 1:
+ return e.item()
else:
- return np.sum(
- (np.sum(((prediction - self._target) ** 2), axis=0)),
- axis=0,
- )
+ return np.sum(e)
def _evaluateS1(self, x):
"""
@@ -157,22 +193,33 @@ def _evaluateS1(self, x):
If an error occurs during the calculation of the cost or gradient.
"""
y, dy = self.problem.evaluateS1(x)
- if len(y) < len(self._target):
- e = np.float64(np.inf)
- de = self._de * np.ones(self.n_parameters)
- else:
- dy = dy.reshape(
- (
- self.problem.n_time_data,
- self.n_outputs,
- self.n_parameters,
- )
- )
- r = y - self._target
- e = np.sum(np.sum(r**2, axis=0), axis=0)
- de = 2 * np.sum(np.sum((r.T * dy.T), axis=2), axis=1)
+ for key in y:
+ if key not in ["Time [s]", "Discharge capacity [A.h]"]:
+ if len(y.get(key, [])) != len(self._target.get(key, [])):
+ e = np.float64(np.inf)
+ de = self._de * np.ones(self.n_parameters)
+ return e, de
+
+ r = np.array(
+ [
+ y[signal] - self._target[signal]
+ for signal in y
+ if signal not in ["Time [s]", "Discharge capacity [A.h]"]
+ ]
+ )
- return e, de
+ if self.n_outputs == 1:
+ r = r.reshape(self.problem.n_time_data)
+ dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
+ e = np.sum(r**2, axis=0)
+ de = 2 * np.sum((r * dy), axis=1)
+ return e.item(), de
+
+ else:
+ r = r.reshape(self.n_outputs, self.problem.n_time_data)
+ e = np.sum(r**2, axis=0)
+ de = 2 * np.sum((r[:, :, np.newaxis] * dy), axis=1)
+ return np.sum(e), np.sum(de, axis=1)
def set_fail_gradient(self, de):
"""
diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py
index 1f56f9b4..ab739bf7 100644
--- a/pybop/models/base_model.py
+++ b/pybop/models/base_model.py
@@ -58,6 +58,8 @@ def __init__(self, name="Base Model"):
self.parameters = None
self.dataset = None
self.signal = None
+ self.non_parameters = None
+ self.default_variables = []
self.matched_parameters = {}
self.non_matched_parameters = {}
self.fit_keys = []
@@ -353,11 +355,14 @@ def simulate(self, inputs, t_eval) -> np.ndarray[np.float64]:
self.built_model, inputs=inputs, t_eval=t_eval
)
else:
- return [np.inf]
+ return {signal: [np.inf] for signal in self.signal}
- predictions = [sol[signal].data for signal in self.signal]
+ predictions = {
+ signal: sol[signal].data
+ for signal in (self.signal + self.default_variables)
+ }
- return np.vstack(predictions).T
+ return predictions
def simulateS1(self, inputs, t_eval):
"""
@@ -399,20 +404,20 @@ def simulateS1(self, inputs, t_eval):
t_eval=t_eval,
calculate_sensitivities=True,
)
+ predictions = {signal: sol[signal].data for signal in self.signal}
- predictions = [sol[signal].data for signal in self.signal]
+ dy = np.asarray(
+ [
+ sol[signal].sensitivities[key].toarray()
+ for signal in self.signal
+ for key in self.fit_keys
+ ]
+ ).reshape(self.n_parameters, self.n_time_data, self.n_outputs)
- sensitivities = [
- np.array(
- [[sol[signal].sensitivities[key]] for signal in self.signal]
- ).reshape(len(sol[self.signal[0]].data), self.n_outputs)
- for key in self.fit_keys
- ]
-
- return np.vstack(predictions).T, np.dstack(sensitivities)
+ return predictions, dy
else:
- return [np.inf], [np.inf]
+ return {signal: [np.inf] for signal in self.signal}, [np.inf]
def predict(
self,
diff --git a/pybop/models/empirical/__init__.py b/pybop/models/empirical/__init__.py
index 58790627..6a28b0a9 100644
--- a/pybop/models/empirical/__init__.py
+++ b/pybop/models/empirical/__init__.py
@@ -1,4 +1,4 @@
#
# Import lithium ion based models
#
-from .ecm import Thevenin
+from .ecm import ECircuitModel, Thevenin
diff --git a/pybop/models/lithium_ion/__init__.py b/pybop/models/lithium_ion/__init__.py
index d61591b4..4dca05ea 100644
--- a/pybop/models/lithium_ion/__init__.py
+++ b/pybop/models/lithium_ion/__init__.py
@@ -1,4 +1,4 @@
#
# Import lithium ion based models
#
-from .echem import SPM, SPMe
+from .echem import EChemBaseModel, SPM, SPMe
diff --git a/pybop/models/lithium_ion/echem_base.py b/pybop/models/lithium_ion/echem_base.py
index 7e5c869f..6a642ff3 100644
--- a/pybop/models/lithium_ion/echem_base.py
+++ b/pybop/models/lithium_ion/echem_base.py
@@ -223,9 +223,14 @@ def approximate_capacity(self, x):
# Calculate average voltage
positive_electrode_ocp = self._parameter_set["Positive electrode OCP [V]"]
negative_electrode_ocp = self._parameter_set["Negative electrode OCP [V]"]
- average_voltage = positive_electrode_ocp(mean_sto_pos) - negative_electrode_ocp(
- mean_sto_neg
- )
+ try:
+ average_voltage = positive_electrode_ocp(
+ mean_sto_pos
+ ) - negative_electrode_ocp(mean_sto_neg)
+ except TypeError:
+ average_voltage = positive_electrode_ocp([mean_sto_pos]).evaluate()[0][
+ 0
+ ] - negative_electrode_ocp(mean_sto_neg) # Super hacky, needs to be fixed
# Calculate and update nominal capacity
theoretical_capacity = theoretical_energy / average_voltage
diff --git a/pybop/plotting/plot_problem.py b/pybop/plotting/plot_problem.py
index 57aebe04..67f63d75 100644
--- a/pybop/plotting/plot_problem.py
+++ b/pybop/plotting/plot_problem.py
@@ -32,37 +32,37 @@ def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs):
parameter_values = problem.x0
# Extract the time data and evaluate the model's output and target values
- reference_time_data = problem.time_data()
+ xaxis_data = problem.time_data()
model_output = problem.evaluate(parameter_values)
target_output = problem.target()
# Create a plot for each output
figure_list = []
- for i in range(0, problem.n_outputs):
+ for i in problem.signal:
default_layout_options = dict(
title="Scatter Plot",
xaxis_title="Time / s",
- yaxis_title=pybop.StandardPlot.remove_brackets(problem.signal[i]),
+ yaxis_title=pybop.StandardPlot.remove_brackets(i),
)
# Create a plotting dictionary
if isinstance(problem, pybop.DesignProblem):
trace_name = "Optimised"
- opt_time_data = model_output[:, -1]
+ opt_time_data = model_output["Time [s]"]
else:
trace_name = "Model"
- opt_time_data = reference_time_data
+ opt_time_data = xaxis_data
plot_dict = pybop.StandardPlot(
x=opt_time_data,
- y=model_output[:, i],
+ y=model_output[i],
layout_options=default_layout_options,
trace_names=trace_name,
)
target_trace = plot_dict.create_trace(
- x=reference_time_data,
- y=target_output[:, i],
+ x=xaxis_data,
+ y=target_output[i],
name="Reference",
mode="markers",
showlegend=True,
@@ -71,12 +71,12 @@ def quick_plot(problem, parameter_values=None, show=True, **layout_kwargs):
if isinstance(problem, pybop.FittingProblem):
# Compute the standard deviation as proxy for uncertainty
- plot_dict.sigma = np.std(model_output[:, i] - target_output[:, i])
+ plot_dict.sigma = np.std(model_output[i] - target_output[i])
# Convert x and upper and lower limits into lists to create a filled trace
- x = reference_time_data.tolist()
- y_upper = (model_output[:, i] + plot_dict.sigma).tolist()
- y_lower = (model_output[:, i] - plot_dict.sigma).tolist()
+ x = xaxis_data.tolist()
+ y_upper = (model_output[i] + plot_dict.sigma).tolist()
+ y_lower = (model_output[i] - plot_dict.sigma).tolist()
fill_trace = plot_dict.create_trace(
x=x + x[::-1],
diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py
index e9d7cd9a..10d1278c 100644
--- a/tests/integration/test_parameterisations.py
+++ b/tests/integration/test_parameterisations.py
@@ -81,6 +81,7 @@ def test_spm_optimisers(self, optimiser, spm_costs, x0):
if optimiser in [pybop.CMAES]:
parameterisation.set_f_guessed_tracking(True)
+ parameterisation.cost.problem._model.allow_infeasible_solutions = False
assert parameterisation._use_f_guessed is True
parameterisation.set_max_iterations(1)
x, final_cost = parameterisation.run()
@@ -126,12 +127,15 @@ def spm_two_signal_cost(self, parameters, model, x0):
{
"Time [s]": solution["Time [s]"].data,
"Current function [A]": solution["Current [A]"].data,
- "Terminal voltage [V]": solution["Terminal voltage [V]"].data,
+ "Voltage [V]": solution["Voltage [V]"].data,
+ "Bulk open-circuit voltage [V]": solution[
+ "Bulk open-circuit voltage [V]"
+ ].data,
}
)
# Define the cost to optimise
- signal = ["Terminal voltage [V]", "Time [s]"]
+ signal = ["Voltage [V]", "Bulk open-circuit voltage [V]"]
problem = pybop.FittingProblem(
model, parameters, dataset, signal=signal, init_soc=init_soc
)
diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py
index 29172932..7d3a4ea6 100644
--- a/tests/unit/test_cost.py
+++ b/tests/unit/test_cost.py
@@ -122,7 +122,7 @@ def test_costs(self, cost):
)
# Test type of returned value
- assert type(cost([0.5])) == np.float64
+ assert np.isscalar(cost([0.5]))
if isinstance(cost, pybop.ObserverCost):
with pytest.raises(NotImplementedError):
@@ -137,7 +137,7 @@ def test_costs(self, cost):
if isinstance(cost, pybop.SumSquaredError):
e, de = cost.evaluateS1([0.5])
- assert type(e) == np.float64
+ assert np.isscalar(e)
assert type(de) == np.ndarray
# Test option setting
@@ -187,7 +187,7 @@ def test_energy_density_costs(
cost = cost_class(problem)
# Test type of returned value
- assert type(cost([0.5])) == np.float64
+ assert np.isscalar(cost([0.5]))
assert cost([0.4]) <= 0 # Should be a viable design
assert cost([0.8]) == np.inf # Should exceed active material + porosity < 1
assert cost([1.4]) == np.inf # Definitely not viable
diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py
index ce9be93e..2e7f7ea5 100644
--- a/tests/unit/test_models.py
+++ b/tests/unit/test_models.py
@@ -202,9 +202,9 @@ def test_simulate(self):
model.signal = ["y_0"]
inputs = {}
t_eval = np.linspace(0, 10, 100)
- expected = y0 * np.exp(-k * t_eval).reshape(-1, 1)
+ expected = y0 * np.exp(-k * t_eval)
solved = model.simulate(inputs, t_eval)
- np.testing.assert_array_almost_equal(solved, expected, decimal=5)
+ np.testing.assert_array_almost_equal(solved["y_0"], expected, decimal=5)
with pytest.raises(ValueError):
ExponentialDecay(n_states=-1)
diff --git a/tests/unit/test_problem.py b/tests/unit/test_problem.py
index 0cdfd8bc..5b436a31 100644
--- a/tests/unit/test_problem.py
+++ b/tests/unit/test_problem.py
@@ -167,7 +167,7 @@ def test_problem_construct_with_model_predict(
assert problem._model._built_model is not None
with pytest.raises(AssertionError):
np.testing.assert_allclose(
- out["Terminal voltage [V]"].data,
- problem_output,
+ out["Voltage [V]"].data,
+ problem_output["Voltage [V]"],
atol=1e-5,
)
diff --git a/tests/unit/test_standalone.py b/tests/unit/test_standalone.py
index 4ba611a9..f5b0a33e 100644
--- a/tests/unit/test_standalone.py
+++ b/tests/unit/test_standalone.py
@@ -53,13 +53,18 @@ def test_standalone_problem(self):
# Test the Problem with a Cost
rmse_cost = pybop.RootMeanSquaredError(problem)
- x = rmse_cost([1, 2])
+ rmse_x = rmse_cost([1, 2])
+ rmse_grad_x = rmse_cost.evaluateS1([1, 2])
- np.testing.assert_allclose(x, 3.138, atol=1e-2)
+ np.testing.assert_allclose(rmse_x, 3.05615, atol=1e-2)
+ np.testing.assert_allclose(rmse_grad_x[1], [-0.81758337, 0.0], atol=1e-2)
# Test the sensitivities
sums_cost = pybop.SumSquaredError(problem)
- sums_cost.evaluateS1([1, 2])
+ x = sums_cost.evaluateS1([1, 2])
+
+ np.testing.assert_allclose(x[0], 934.006734006734, atol=1e-2)
+ np.testing.assert_allclose(x[1], [-334.006734, 0.0], atol=1e-2)
# Test incorrect number of initial parameter values
with pytest.raises(ValueError):
From 3428c97608d788c9342d6fd429f1eb4272e7888c Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Fri, 1 Mar 2024 14:13:02 +0000
Subject: [PATCH 31/64] Fix ukf examples, temporarily limits ukf to signal
output model
---
examples/scripts/exp_UKF.py | 5 ++-
pybop/observers/observer.py | 64 +++++++++++++++++------------
pybop/observers/unscented_kalman.py | 3 +-
3 files changed, 42 insertions(+), 30 deletions(-)
diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py
index 965775ec..77e0c645 100644
--- a/examples/scripts/exp_UKF.py
+++ b/examples/scripts/exp_UKF.py
@@ -42,7 +42,8 @@
simulator = pybop.Observer(parameters, model, signal=["2y"], x0=x0)
simulator._time_data = t_eval
measurements = simulator.evaluate(x0)
-measurements = measurements[:, 0]
+
+measurements = measurements["2y"]
# Verification step: Compare by plotting
go = pybop.PlotlyManager().go
@@ -85,7 +86,7 @@
# Verification step: Find the maximum likelihood estimate given the true parameters
estimation = observer.evaluate(x0)
-estimation = estimation[:, 0]
+estimation = estimation["2y"]
# Verification step: Add the estimate to the plot
line4 = go.Scatter(x=t_eval, y=estimation, name="Estimated trajectory", mode="lines")
diff --git a/pybop/observers/observer.py b/pybop/observers/observer.py
index 0a93fe8a..173d9f3c 100644
--- a/pybop/observers/observer.py
+++ b/pybop/observers/observer.py
@@ -53,6 +53,7 @@ def __init__(
self._state = model.reinit(inputs)
self._model = model
self._signal = self.signal
+ self._n_outputs = len(self._signal)
def reset(self, inputs: Inputs) -> None:
self._state = self._model.reinit(inputs)
@@ -78,9 +79,7 @@ def observe(self, time: float, value: Optional[np.ndarray] = None) -> float:
self._state = self._model.step(self._state, time)
return 0.0
- def log_likelihood(
- self, values: np.ndarray, times: np.ndarray, inputs: Inputs
- ) -> float:
+ def log_likelihood(self, values: dict, times: np.ndarray, inputs: Inputs) -> float:
"""
Returns the log likelihood of the model given the values and inputs.
@@ -93,16 +92,22 @@ def log_likelihood(
inputs : Inputs
The inputs to the model.
"""
- if len(values) != len(times):
- raise ValueError("values and times must have the same length.")
- log_likelihood = 0.0
- self.reset(inputs)
- for t, v in zip(times, values):
- try:
- log_likelihood += self.observe(t, v)
- except Exception:
- return np.float64(-np.inf)
- return log_likelihood
+ if self._n_outputs == 1:
+ signal = self._signal[0]
+ if len(values[signal]) != len(times):
+ raise ValueError("values and times must have the same length.")
+ log_likelihood = 0.0
+ self.reset(inputs)
+ for t, v in zip(times, values[signal]):
+ try:
+ log_likelihood += self.observe(t, v)
+ except Exception:
+ return np.float64(-np.inf)
+ return log_likelihood
+ else:
+ raise ValueError(
+ "Obersever.log_likelihood is currently restricted to single output models."
+ )
def get_current_state(self) -> TimeSeriesState:
"""
@@ -156,17 +161,24 @@ def evaluate(self, x):
inputs[param.name] = x[i]
self.reset(inputs)
- output = []
- if hasattr(self, "_dataset"):
- ym = self._target
- for i, t in enumerate(self._time_data):
- self.observe(t, ym[i])
- ys = self.get_current_measure()
- output.append(ys)
+ if self._n_outputs == 1:
+ signal = self._signal[0]
+ output = []
+ if hasattr(self, "_dataset"):
+ ym = self._target[signal]
+ for i, t in enumerate(self._time_data):
+ self.observe(t, ym[i])
+ ys = self.get_current_measure()
+ output.append(ys)
+ else:
+ for t in self._time_data:
+ self.observe(t)
+ ys = self.get_current_measure()
+ output.append(ys)
+
+ out = {signal: np.vstack(output) for signal in self._signal}
+ return out
else:
- for t in self._time_data:
- self.observe(t)
- ys = self.get_current_measure()
- output.append(ys)
-
- return np.vstack(output)
+ raise ValueError(
+ "Observer is currently restricted to single output models."
+ )
diff --git a/pybop/observers/unscented_kalman.py b/pybop/observers/unscented_kalman.py
index 62b9d0a7..62e98d91 100644
--- a/pybop/observers/unscented_kalman.py
+++ b/pybop/observers/unscented_kalman.py
@@ -62,8 +62,7 @@ def __init__(
self._time_data = self._dataset["Time [s]"]
self.n_time_data = len(self._time_data)
- target = [self._dataset[signal] for signal in self.signal]
- self._target = np.vstack(target).T
+ self._target = {signal: self._dataset[signal] for signal in self.signal}
# Add useful parameters to model
if model is not None:
From 43521dae672d82ede892d4c21afa2eb4a53146a7 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Sat, 2 Mar 2024 19:05:46 +0000
Subject: [PATCH 32/64] default_variables to additional_variables w/
docstrings, updt. observer tests, dict output observer.evaluate()
---
examples/scripts/exp_UKF.py | 6 ++---
pybop/_problem.py | 38 ++++++++++++++++++++++-------
pybop/models/base_model.py | 5 ++--
pybop/observers/observer.py | 33 +++++++++++++------------
pybop/observers/unscented_kalman.py | 5 +++-
tests/unit/test_observers.py | 4 +--
6 files changed, 57 insertions(+), 34 deletions(-)
diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py
index 77e0c645..bbbc6f95 100644
--- a/examples/scripts/exp_UKF.py
+++ b/examples/scripts/exp_UKF.py
@@ -43,15 +43,15 @@
simulator._time_data = t_eval
measurements = simulator.evaluate(x0)
-measurements = measurements["2y"]
-
# Verification step: Compare by plotting
go = pybop.PlotlyManager().go
line1 = go.Scatter(x=t_eval, y=corrupt_values, name="Corrupt values", mode="markers")
line2 = go.Scatter(
x=t_eval, y=expected_values, name="Expected trajectory", mode="lines"
)
-line3 = go.Scatter(x=t_eval, y=measurements, name="Observed values", mode="markers")
+line3 = go.Scatter(
+ x=t_eval, y=measurements["2y"], name="Observed values", mode="markers"
+)
fig = go.Figure(data=[line1, line2, line3])
# Form dataset
diff --git a/pybop/_problem.py b/pybop/_problem.py
index e2aaee3d..f1f0e8c8 100644
--- a/pybop/_problem.py
+++ b/pybop/_problem.py
@@ -16,6 +16,8 @@ class BaseProblem:
Flag to indicate if the model should be checked (default: True).
signal: List[str]
The signal to observe.
+ additional_variables : List[str], optional
+ Additional variables to observe and store in the solution (default: []).
init_soc : float, optional
Initial state of charge (default: None).
x0 : np.ndarray, optional
@@ -28,7 +30,7 @@ def __init__(
model=None,
check_model=True,
signal=["Voltage [V]"],
- default_variables=[],
+ additional_variables=[],
init_soc=None,
x0=None,
):
@@ -48,9 +50,9 @@ def __init__(
self._target = None
if isinstance(model, (pybop.BaseModel, pybop.lithium_ion.EChemBaseModel)):
- self.default_variables = default_variables
+ self.additional_variables = additional_variables
else:
- self.default_variables = []
+ self.additional_variables = []
# Set bounds
self.bounds = dict(
@@ -146,6 +148,12 @@ class FittingProblem(BaseProblem):
Dataset object containing the data to fit the model to.
signal : str, optional
The signal to fit (default: "Voltage [V]").
+ additional_variables : List[str], optional
+ Additional variables to observe and store in the solution (default: []).
+ init_soc : float, optional
+ Initial state of charge (default: None).
+ x0 : np.ndarray, optional
+ Initial parameter values (default: None).
"""
def __init__(
@@ -155,12 +163,13 @@ def __init__(
dataset,
check_model=True,
signal=["Voltage [V]"],
- default_variables=["Time [s]", "Discharge capacity [A.h]"],
+ additional_variables=[],
init_soc=None,
x0=None,
):
+ additional_variables += ["Time [s]", "Discharge capacity [A.h]"]
super().__init__(
- parameters, model, check_model, signal, default_variables, init_soc, x0
+ parameters, model, check_model, signal, additional_variables, init_soc, x0
)
self._dataset = dataset.data
self.x = self.x0
@@ -176,7 +185,7 @@ def __init__(
# Add useful parameters to model
if model is not None:
self._model.signal = self.signal
- self._model.default_variables = self.default_variables
+ self._model.additional_variables = self.additional_variables
self._model.n_parameters = self.n_parameters
self._model.n_outputs = self.n_outputs
self._model.n_time_data = self.n_time_data
@@ -257,6 +266,16 @@ class DesignProblem(BaseProblem):
List of parameters for the problem.
experiment : object
The experimental setup to apply the model to.
+ check_model : bool, optional
+ Flag to indicate if the model parameters should be checked for feasibility each iteration (default: True).
+ signal : str, optional
+ The signal to fit (default: "Voltage [V]").
+ additional_variables : List[str], optional
+ Additional variables to observe and store in the solution (default: []).
+ init_soc : float, optional
+ Initial state of charge (default: None).
+ x0 : np.ndarray, optional
+ Initial parameter values (default: None).
"""
def __init__(
@@ -266,12 +285,13 @@ def __init__(
experiment,
check_model=True,
signal=["Voltage [V]"],
- default_variables=["Time [s]", "Current [A]", "Discharge capacity [A.h]"],
+ additional_variables=[],
init_soc=None,
x0=None,
):
+ additional_variables += ["Time [s]", "Current [A]", "Discharge capacity [A.h]"]
super().__init__(
- parameters, model, check_model, signal, default_variables, init_soc, x0
+ parameters, model, check_model, signal, additional_variables, init_soc, x0
)
self.experiment = experiment
@@ -323,7 +343,7 @@ def evaluate(self, x):
else:
predictions = {}
- for signal in self.signal + self.default_variables:
+ for signal in self.signal + self.additional_variables:
predictions[signal] = sol[signal].data
return predictions
diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py
index ab739bf7..8e291676 100644
--- a/pybop/models/base_model.py
+++ b/pybop/models/base_model.py
@@ -58,8 +58,7 @@ def __init__(self, name="Base Model"):
self.parameters = None
self.dataset = None
self.signal = None
- self.non_parameters = None
- self.default_variables = []
+ self.additional_variables = []
self.matched_parameters = {}
self.non_matched_parameters = {}
self.fit_keys = []
@@ -359,7 +358,7 @@ def simulate(self, inputs, t_eval) -> np.ndarray[np.float64]:
predictions = {
signal: sol[signal].data
- for signal in (self.signal + self.default_variables)
+ for signal in (self.signal + self.additional_variables)
}
return predictions
diff --git a/pybop/observers/observer.py b/pybop/observers/observer.py
index 173d9f3c..ce482ec8 100644
--- a/pybop/observers/observer.py
+++ b/pybop/observers/observer.py
@@ -22,6 +22,8 @@ class Observer(BaseProblem):
Flag to indicate if the model should be checked (default: True).
signal: List[str]
The signal to observe.
+ additional_variables : List[str], optional
+ Additional variables to observe and store in the solution (default: []).
init_soc : float, optional
Initial state of charge (default: None).
x0 : np.ndarray, optional
@@ -37,10 +39,13 @@ def __init__(
model: BaseModel,
check_model=True,
signal=["Voltage [V]"],
+ additional_variables=[],
init_soc=None,
x0=None,
) -> None:
- super().__init__(parameters, model, check_model, signal, init_soc, x0)
+ super().__init__(
+ parameters, model, check_model, signal, additional_variables, init_soc, x0
+ )
if model._built_model is None:
raise ValueError("Only built models can be used in Observers")
if model.signal is None:
@@ -161,24 +166,20 @@ def evaluate(self, x):
inputs[param.name] = x[i]
self.reset(inputs)
- if self._n_outputs == 1:
- signal = self._signal[0]
- output = []
- if hasattr(self, "_dataset"):
+ output = {}
+ ys = []
+ if hasattr(self, "_dataset"):
+ for signal in self._signal:
ym = self._target[signal]
for i, t in enumerate(self._time_data):
self.observe(t, ym[i])
- ys = self.get_current_measure()
- output.append(ys)
- else:
+ ys.append(self.get_current_measure())
+ output[signal] = np.vstack(ys)
+ else:
+ for signal in self._signal:
for t in self._time_data:
self.observe(t)
- ys = self.get_current_measure()
- output.append(ys)
+ ys.append(self.get_current_measure())
+ output[signal] = np.vstack(ys)
- out = {signal: np.vstack(output) for signal in self._signal}
- return out
- else:
- raise ValueError(
- "Observer is currently restricted to single output models."
- )
+ return output
diff --git a/pybop/observers/unscented_kalman.py b/pybop/observers/unscented_kalman.py
index 62e98d91..e0dd7b8f 100644
--- a/pybop/observers/unscented_kalman.py
+++ b/pybop/observers/unscented_kalman.py
@@ -50,10 +50,13 @@ def __init__(
dataset=None,
check_model=True,
signal=["Voltage [V]"],
+ additional_variables=[],
init_soc=None,
x0=None,
) -> None:
- super().__init__(parameters, model, check_model, signal, init_soc, x0)
+ super().__init__(
+ parameters, model, check_model, signal, additional_variables, init_soc, x0
+ )
if dataset is not None:
self._dataset = dataset.data
diff --git a/tests/unit/test_observers.py b/tests/unit/test_observers.py
index ab77428c..e2c44d74 100644
--- a/tests/unit/test_observers.py
+++ b/tests/unit/test_observers.py
@@ -62,7 +62,7 @@ def test_observer(self, model, parameters, x0):
observer.observe(-1)
with pytest.raises(ValueError):
observer.log_likelihood(
- t_eval, np.array([1]), inputs=observer._state.inputs
+ {"2y": t_eval}, np.array([1]), inputs=observer._state.inputs
)
# Test covariance
@@ -81,7 +81,7 @@ def test_observer(self, model, parameters, x0):
"Output": expected,
}
)
- observer._target = expected
+ observer._target = {"2y": expected}
observer.evaluate(x0)
@pytest.mark.unit
From 67d28876c50e303eb359707f80023f38fbbc0a74 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Mon, 4 Mar 2024 13:50:13 +0000
Subject: [PATCH 33/64] Fix integration test logic, add gradient landscape
plots, pin pytest version due to breaking change in 8.1.0
---
examples/scripts/exp_UKF.py | 5 +-
examples/scripts/spm_adam.py | 39 ++++++--
noxfile.py | 2 +-
pybop/costs/fitting_costs.py | 13 +--
pybop/plotting/plot_cost2d.py | 47 ++++++++-
pyproject.toml | 2 +-
tests/integration/test_parameterisations.py | 100 +++++++++++---------
7 files changed, 141 insertions(+), 67 deletions(-)
diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py
index bbbc6f95..d7d838cc 100644
--- a/examples/scripts/exp_UKF.py
+++ b/examples/scripts/exp_UKF.py
@@ -86,10 +86,11 @@
# Verification step: Find the maximum likelihood estimate given the true parameters
estimation = observer.evaluate(x0)
-estimation = estimation["2y"]
# Verification step: Add the estimate to the plot
-line4 = go.Scatter(x=t_eval, y=estimation, name="Estimated trajectory", mode="lines")
+line4 = go.Scatter(
+ x=t_eval, y=estimation["2y"], name="Estimated trajectory", mode="lines"
+)
fig.add_trace(line4)
fig.show()
diff --git a/examples/scripts/spm_adam.py b/examples/scripts/spm_adam.py
index 56d315f4..78d484b4 100644
--- a/examples/scripts/spm_adam.py
+++ b/examples/scripts/spm_adam.py
@@ -20,34 +20,50 @@
]
# Generate data
-sigma = 0.01
-t_eval = np.arange(0, 900, 2)
-values = model.predict(t_eval=t_eval)
-corrupt_values = values["Voltage [V]"].data + np.random.normal(0, sigma, len(t_eval))
+init_soc = 0.5
+sigma = 0.003
+experiment = pybop.Experiment(
+ [
+ (
+ "Discharge at 0.5C for 3 minutes (1 second period)",
+ "Charge at 0.5C for 3 minutes (1 second period)",
+ ),
+ ]
+ * 2
+)
+values = model.predict(init_soc=init_soc, experiment=experiment)
+
+
+def noise(sigma):
+ return np.random.normal(0, sigma, len(values["Voltage [V]"].data))
+
# Form dataset
dataset = pybop.Dataset(
{
- "Time [s]": t_eval,
+ "Time [s]": values["Time [s]"].data,
"Current function [A]": values["Current [A]"].data,
- "Voltage [V]": corrupt_values,
- "Bulk open-circuit voltage [V]": values["Bulk open-circuit voltage [V]"].data,
+ "Voltage [V]": values["Voltage [V]"].data + noise(sigma),
+ "Bulk open-circuit voltage [V]": values["Bulk open-circuit voltage [V]"].data
+ + noise(sigma),
}
)
signal = ["Voltage [V]", "Bulk open-circuit voltage [V]"]
# Generate problem, cost function, and optimisation class
-problem = pybop.FittingProblem(model, parameters, dataset, signal=signal)
+problem = pybop.FittingProblem(
+ model, parameters, dataset, signal=signal, init_soc=init_soc
+)
cost = pybop.RootMeanSquaredError(problem)
optim = pybop.Optimisation(
cost,
optimiser=pybop.Adam,
verbose=True,
allow_infeasible_solutions=True,
- sigma0=sigma,
+ sigma0=0.05,
)
optim.set_max_iterations(100)
-optim.set_max_unchanged_iterations(20)
+optim.set_max_unchanged_iterations(45)
# Run optimisation
x, final_cost = optim.run()
@@ -64,3 +80,6 @@
# Plot the cost landscape with optimisation path
pybop.plot_optim2d(optim, steps=15)
+
+# Plot the cost and gradient landscapes
+pybop.plot_cost2d(cost, gradient=True, steps=3)
diff --git a/noxfile.py b/noxfile.py
index c732bc3f..5784e90d 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -33,7 +33,7 @@ def coverage(session):
"--cov-report=xml",
)
session.run(
- "pytest", "--plots", "--cov", "--cov-append", "--cov-report=xml", "-n", "1"
+ "pytest", "--plots", "--cov", "--cov-append", "--cov-report=xml", "-n", "0"
)
diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py
index d8bb212a..eb21001a 100644
--- a/pybop/costs/fitting_costs.py
+++ b/pybop/costs/fitting_costs.py
@@ -98,16 +98,17 @@ def _evaluateS1(self, x):
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
e = np.sqrt(np.mean(r**2))
- de = np.mean((r * dy), axis=1) / np.sqrt(
- np.mean((r * dy) ** 2, axis=1) + np.finfo(float).eps
+ de = np.mean((r * dy), axis=1) / (
+ np.sqrt(np.mean((r * dy) ** 2, axis=1) + np.finfo(float).eps)
)
return e.item(), de.flatten()
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
e = np.sqrt(np.mean(r**2, axis=1))
- de = np.mean((r[:, :, np.newaxis] * dy), axis=1) / np.sqrt(
- np.mean((r[:, :, np.newaxis] * dy) ** 2, axis=1) + np.finfo(float).eps
+ de = np.mean((r[:, :, np.newaxis] * dy), axis=1) / (
+ np.sqrt(np.mean((r[:, :, np.newaxis] * dy) ** 2, axis=1))
+ + np.finfo(float).eps
)
return np.sum(e), np.sum(de, axis=1)
@@ -162,7 +163,7 @@ def _evaluate(self, x, grad=None):
e = np.array(
[
- np.sum(((prediction[signal] - self._target[signal]) ** 2), axis=0)
+ np.sum(((prediction[signal] - self._target[signal]) ** 2))
for signal in prediction
if signal not in ["Time [s]", "Discharge capacity [A.h]"]
]
@@ -217,7 +218,7 @@ def _evaluateS1(self, x):
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
- e = np.sum(r**2, axis=0)
+ e = np.sum(r**2, axis=1)
de = 2 * np.sum((r[:, :, np.newaxis] * dy), axis=1)
return np.sum(e), np.sum(de, axis=1)
diff --git a/pybop/plotting/plot_cost2d.py b/pybop/plotting/plot_cost2d.py
index 6e429117..30c2ef66 100644
--- a/pybop/plotting/plot_cost2d.py
+++ b/pybop/plotting/plot_cost2d.py
@@ -3,7 +3,9 @@
import numpy as np
-def plot_cost2d(cost, bounds=None, steps=10, show=True, **layout_kwargs):
+def plot_cost2d(
+ cost, gradient=False, bounds=None, steps=10, show=True, **layout_kwargs
+):
"""
Plot a 2D visualisation of a cost landscape using Plotly.
@@ -54,6 +56,23 @@ def plot_cost2d(cost, bounds=None, steps=10, show=True, **layout_kwargs):
for j, yj in enumerate(y):
costs[j, i] = cost(np.array([xi, yj]))
+ if gradient:
+ grad_parameter_costs = []
+
+ # Determine the number of gradient outputs from cost.evaluateS1
+ num_gradients = len(cost.evaluateS1(np.array([x[0], y[0]]))[1])
+
+ # Create an array to hold each gradient output & populate
+ grads = [np.zeros((len(y), len(x))) for _ in range(num_gradients)]
+ for i, xi in enumerate(x):
+ for j, yj in enumerate(y):
+ (*current_grads,) = cost.evaluateS1(np.array([xi, yj]))[1]
+ for k, grad_output in enumerate(current_grads):
+ grads[k][j, i] = grad_output
+
+ # Append the arrays to the grad_parameter_costs list
+ grad_parameter_costs.extend(grads)
+
# Import plotly only when needed
go = pybop.PlotlyManager().go
@@ -80,6 +99,32 @@ def plot_cost2d(cost, bounds=None, steps=10, show=True, **layout_kwargs):
elif show:
fig.show()
+ if gradient:
+ grad_figs = []
+ for i, grad_costs in enumerate(grad_parameter_costs):
+ # Update title for gradient plots
+ updated_layout_options = layout_options.copy()
+ updated_layout_options["title"] = f"Gradient for Parameter: {i+1}"
+
+ # Create contour plot with updated layout options
+ grad_layout = go.Layout(updated_layout_options)
+
+ # Create fig
+ grad_fig = go.Figure(
+ data=[go.Contour(x=x, y=y, z=grad_costs)], layout=grad_layout
+ )
+ grad_fig.update_layout(**layout_kwargs)
+
+ if "ipykernel" in sys.modules and show:
+ grad_fig.show("svg")
+ elif show:
+ grad_fig.show()
+
+ # append grad_fig to list
+ grad_figs.append(grad_fig)
+
+ return fig, grad_figs
+
return fig
diff --git a/pyproject.toml b/pyproject.toml
index 36eb03a4..3287989f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -41,7 +41,7 @@ dev = [
"nox",
"nbmake",
"pre-commit",
- "pytest>=6",
+ "pytest<=8",
"pytest-cov",
"pytest-mock",
"pytest-xdist",
diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py
index 10d1278c..33ad4f36 100644
--- a/tests/integration/test_parameterisations.py
+++ b/tests/integration/test_parameterisations.py
@@ -8,6 +8,12 @@ class TestModelParameterisation:
A class to test the model parameterisation methods.
"""
+ @pytest.fixture(autouse=True)
+ def setup(self):
+ self.ground_truth = np.array([0.55, 0.55]) + np.random.normal(
+ loc=0.0, scale=0.05, size=2
+ )
+
@pytest.fixture
def model(self):
parameter_set = pybop.ParameterSet.pybamm("Chen2020")
@@ -18,21 +24,17 @@ def parameters(self):
return [
pybop.Parameter(
"Negative electrode active material volume fraction",
- prior=pybop.Gaussian(0.6, 0.02),
- bounds=[0.375, 0.7],
+ prior=pybop.Gaussian(0.55, 0.05),
+ bounds=[0.375, 0.75],
),
pybop.Parameter(
"Positive electrode active material volume fraction",
- prior=pybop.Gaussian(0.5, 0.02),
- bounds=[0.375, 0.625],
+ prior=pybop.Gaussian(0.55, 0.05),
+ bounds=[0.375, 0.75],
),
]
- @pytest.fixture
- def x0(self):
- return np.array([0.63, 0.51])
-
- @pytest.fixture(params=[0.3, 0.7])
+ @pytest.fixture(params=[0.4, 0.7])
def init_soc(self, request):
return request.param
@@ -40,20 +42,24 @@ def init_soc(self, request):
def cost_class(self, request):
return request.param
+ def noise(self, sigma, values):
+ return np.random.normal(0, sigma, values)
+
@pytest.fixture
- def spm_costs(self, parameters, model, x0, cost_class, init_soc):
+ def spm_costs(self, model, parameters, cost_class, init_soc):
# Form dataset
- solution = self.getdata(model, x0, init_soc)
+ solution = self.getdata(model, self.ground_truth, init_soc)
dataset = pybop.Dataset(
{
"Time [s]": solution["Time [s]"].data,
"Current function [A]": solution["Current [A]"].data,
- "Terminal voltage [V]": solution["Terminal voltage [V]"].data,
+ "Voltage [V]": solution["Voltage [V]"].data
+ + self.noise(0.002, len(solution["Time [s]"].data)),
}
)
# Define the cost to optimise
- signal = ["Terminal voltage [V]"]
+ signal = ["Voltage [V]"]
problem = pybop.FittingProblem(
model, parameters, dataset, signal=signal, init_soc=init_soc
)
@@ -74,10 +80,14 @@ def spm_costs(self, parameters, model, x0, cost_class, init_soc):
],
)
@pytest.mark.integration
- def test_spm_optimisers(self, optimiser, spm_costs, x0):
+ def test_spm_optimisers(self, optimiser, spm_costs):
# Test each optimiser
- parameterisation = pybop.Optimisation(cost=spm_costs, optimiser=optimiser)
+ initial_cost = spm_costs(spm_costs.x0)
+ parameterisation = pybop.Optimisation(
+ cost=spm_costs, optimiser=optimiser, sigma0=0.05
+ )
parameterisation.set_max_unchanged_iterations(iterations=25, threshold=5e-4)
+ parameterisation.set_max_iterations(125)
if optimiser in [pybop.CMAES]:
parameterisation.set_f_guessed_tracking(True)
@@ -102,35 +112,34 @@ def test_spm_optimisers(self, optimiser, spm_costs, x0):
parameterisation.optimiser.set_population_size(-5)
parameterisation.optimiser.set_population_size(5)
- parameterisation.set_max_iterations(125)
x, final_cost = parameterisation.run()
elif optimiser in [pybop.SciPyMinimize]:
parameterisation.cost.problem._model.allow_infeasible_solutions = False
- parameterisation.set_max_iterations(125)
x, final_cost = parameterisation.run()
else:
- parameterisation.set_max_iterations(125)
x, final_cost = parameterisation.run()
# Assertions
- np.testing.assert_allclose(final_cost, 0, atol=1e-2)
- np.testing.assert_allclose(x, x0, atol=5e-2)
+ assert initial_cost > final_cost
+ np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2)
@pytest.fixture
- def spm_two_signal_cost(self, parameters, model, x0):
+ def spm_two_signal_cost(self, parameters, model, cost_class):
# Form dataset
init_soc = 0.5
- solution = self.getdata(model, x0, init_soc)
+ solution = self.getdata(model, self.ground_truth, init_soc)
dataset = pybop.Dataset(
{
"Time [s]": solution["Time [s]"].data,
"Current function [A]": solution["Current [A]"].data,
- "Voltage [V]": solution["Voltage [V]"].data,
+ "Voltage [V]": solution["Voltage [V]"].data
+ + self.noise(0.002, len(solution["Time [s]"].data)),
"Bulk open-circuit voltage [V]": solution[
"Bulk open-circuit voltage [V]"
- ].data,
+ ].data
+ + self.noise(0.002, len(solution["Time [s]"].data)),
}
)
@@ -139,54 +148,55 @@ def spm_two_signal_cost(self, parameters, model, x0):
problem = pybop.FittingProblem(
model, parameters, dataset, signal=signal, init_soc=init_soc
)
- return pybop.SumSquaredError(problem)
+ return cost_class(problem)
@pytest.mark.parametrize(
- "optimiser",
+ "multi_optimiser",
[
pybop.SciPyDifferentialEvolution,
- pybop.IRPropMin,
+ pybop.Adam,
pybop.CMAES,
],
)
@pytest.mark.integration
- def test_multiple_signals(self, optimiser, spm_two_signal_cost, x0):
+ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost):
# Test each optimiser
+ initial_cost = spm_two_signal_cost(spm_two_signal_cost.x0)
parameterisation = pybop.Optimisation(
- cost=spm_two_signal_cost, optimiser=optimiser
+ cost=spm_two_signal_cost, optimiser=multi_optimiser, sigma0=0.05
)
parameterisation.set_max_unchanged_iterations(iterations=15, threshold=5e-4)
- parameterisation.set_max_iterations(100)
+ parameterisation.set_max_iterations(125)
- if optimiser in [pybop.SciPyDifferentialEvolution]:
+ if multi_optimiser in [pybop.SciPyDifferentialEvolution]:
parameterisation.optimiser.set_population_size(5)
x, final_cost = parameterisation.run()
# Assertions
- np.testing.assert_allclose(final_cost, 0, atol=1e-2)
- np.testing.assert_allclose(x, x0, atol=5e-2)
+ assert initial_cost > final_cost
+ np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2)
- @pytest.mark.parametrize("init_soc", [0.3, 0.7])
+ @pytest.mark.parametrize("init_soc", [0.4, 0.7])
@pytest.mark.integration
- def test_model_misparameterisation(self, parameters, model, x0, init_soc):
+ def test_model_misparameterisation(self, parameters, model, init_soc):
# Define two different models with different parameter sets
# The optimisation should fail as the models are not the same
second_parameter_set = pybop.ParameterSet.pybamm("Ecker2015")
second_model = pybop.lithium_ion.SPM(parameter_set=second_parameter_set)
# Form dataset
- solution = self.getdata(second_model, x0, init_soc)
+ solution = self.getdata(second_model, self.ground_truth, init_soc)
dataset = pybop.Dataset(
{
"Time [s]": solution["Time [s]"].data,
"Current function [A]": solution["Current [A]"].data,
- "Terminal voltage [V]": solution["Terminal voltage [V]"].data,
+ "Voltage [V]": solution["Voltage [V]"].data,
}
)
# Define the cost to optimise
- signal = ["Terminal voltage [V]"]
+ signal = ["Voltage [V]"]
problem = pybop.FittingProblem(
model, parameters, dataset, signal=signal, init_soc=init_soc
)
@@ -204,22 +214,20 @@ def test_model_misparameterisation(self, parameters, model, x0, init_soc):
# Assertions
with np.testing.assert_raises(AssertionError):
np.testing.assert_allclose(final_cost, 0, atol=1e-2)
- np.testing.assert_allclose(x, x0, atol=5e-2)
+ np.testing.assert_allclose(x, self.ground_truth, atol=2e-2)
- def getdata(self, model, x0, init_soc):
+ def getdata(self, model, x, init_soc):
model.parameter_set.update(
{
- "Negative electrode active material volume fraction": x0[0],
- "Positive electrode active material volume fraction": x0[1],
+ "Negative electrode active material volume fraction": x[0],
+ "Positive electrode active material volume fraction": x[1],
}
)
experiment = pybop.Experiment(
[
(
- "Discharge at 1C for 3 minutes (1 second period)",
- "Rest for 2 minutes (1 second period)",
- "Charge at 1C for 3 minutes (1 second period)",
- "Rest for 2 minutes (1 second period)",
+ "Discharge at 0.5C for 3 minutes (1 second period)",
+ "Charge at 0.5C for 3 minutes (1 second period)",
),
]
* 2
From b6a073ba39a29c2147e380eb60b4391d8fe0d58f Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Mon, 4 Mar 2024 14:23:45 +0000
Subject: [PATCH 34/64] Add tests for gradient plots, up coverage
---
tests/unit/test_plots.py | 28 +++++++++++++++++++++-------
1 file changed, 21 insertions(+), 7 deletions(-)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index fff8074b..bc721f02 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -13,18 +13,32 @@ def model(self):
# Define an example model
return pybop.lithium_ion.SPM()
+ # @pytest.fixture
+ # def parameters(self):
+ # return [
+ # pybop.Parameter(
+ # "Negative particle radius [m]",
+ # prior=pybop.Gaussian(6e-06, 0.1e-6),
+ # bounds=[1e-6, 9e-6],
+ # ),
+ # pybop.Parameter(
+ # "Positive particle radius [m]",
+ # prior=pybop.Gaussian(4.5e-06, 0.1e-6),
+ # bounds=[1e-6, 9e-6],
+ # ),
+ # ]
@pytest.fixture
def parameters(self):
return [
pybop.Parameter(
- "Negative particle radius [m]",
- prior=pybop.Gaussian(6e-06, 0.1e-6),
- bounds=[1e-6, 9e-6],
+ "Negative electrode active material volume fraction",
+ prior=pybop.Gaussian(0.68, 0.05),
+ bounds=[0.5, 0.8],
),
pybop.Parameter(
- "Positive particle radius [m]",
- prior=pybop.Gaussian(4.5e-06, 0.1e-6),
- bounds=[1e-6, 9e-6],
+ "Positive electrode active material volume fraction",
+ prior=pybop.Gaussian(0.58, 0.05),
+ bounds=[0.4, 0.7],
),
]
@@ -65,7 +79,7 @@ def cost(self, problem):
@pytest.mark.unit
def test_cost_plots(self, cost):
# Test plotting of Cost objects
- pybop.plot_cost2d(cost, steps=5)
+ pybop.plot_cost2d(cost, gradient=True, steps=5)
@pytest.fixture
def optim(self, cost):
From ee4cdffce74bb053821d0a76d26ce570f0f748dd Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Mon, 4 Mar 2024 16:30:12 +0000
Subject: [PATCH 35/64] Set default SciPyMinimize method to Nelder-Mead,
clean-up repo
---
examples/scripts/spm_scipymin.py | 4 ++--
pybop/optimisers/scipy_optimisers.py | 5 +++--
pyproject.toml | 2 +-
tests/unit/test_plots.py | 14 --------------
4 files changed, 6 insertions(+), 19 deletions(-)
diff --git a/examples/scripts/spm_scipymin.py b/examples/scripts/spm_scipymin.py
index db1d783d..759f8c2e 100644
--- a/examples/scripts/spm_scipymin.py
+++ b/examples/scripts/spm_scipymin.py
@@ -21,12 +21,12 @@
parameters = [
pybop.Parameter(
"Negative electrode active material volume fraction",
- prior=pybop.Gaussian(0.6, 0.02),
+ prior=pybop.Gaussian(0.6, 0.05),
bounds=[0.5, 0.8],
),
pybop.Parameter(
"Positive electrode active material volume fraction",
- prior=pybop.Gaussian(0.48, 0.02),
+ prior=pybop.Gaussian(0.48, 0.05),
bounds=[0.4, 0.7],
),
]
diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py
index 7e717b81..f4ea49a7 100644
--- a/pybop/optimisers/scipy_optimisers.py
+++ b/pybop/optimisers/scipy_optimisers.py
@@ -12,7 +12,8 @@ class SciPyMinimize(BaseOptimiser):
Parameters
----------
method : str, optional
- The type of solver to use. If not specified, defaults to 'COBYLA'.
+ The type of solver to use. If not specified, defaults to 'Nelder-Mead'.
+ Options: 'Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'.
bounds : sequence or ``Bounds``, optional
Bounds for variables as supported by the selected method.
maxiter : int, optional
@@ -27,7 +28,7 @@ def __init__(self, method=None, bounds=None, maxiter=None):
self._max_iterations = maxiter
if self.method is None:
- self.method = "COBYLA" # "L-BFGS-B"
+ self.method = "Nelder-Mead"
def _runoptimise(self, cost_function, x0, bounds):
"""
diff --git a/pyproject.toml b/pyproject.toml
index 3287989f..36eb03a4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -41,7 +41,7 @@ dev = [
"nox",
"nbmake",
"pre-commit",
- "pytest<=8",
+ "pytest>=6",
"pytest-cov",
"pytest-mock",
"pytest-xdist",
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index bc721f02..d2f8387a 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -13,20 +13,6 @@ def model(self):
# Define an example model
return pybop.lithium_ion.SPM()
- # @pytest.fixture
- # def parameters(self):
- # return [
- # pybop.Parameter(
- # "Negative particle radius [m]",
- # prior=pybop.Gaussian(6e-06, 0.1e-6),
- # bounds=[1e-6, 9e-6],
- # ),
- # pybop.Parameter(
- # "Positive particle radius [m]",
- # prior=pybop.Gaussian(4.5e-06, 0.1e-6),
- # bounds=[1e-6, 9e-6],
- # ),
- # ]
@pytest.fixture
def parameters(self):
return [
From 66efaba7832774844a5449024e845d7daeb95b11 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Fri, 8 Mar 2024 09:55:58 +0000
Subject: [PATCH 36/64] unicode fix for win notebooks, update prediction shape
checks, remove Discharge capacity as default additional_variable
---
conftest.py | 5 +++
pybop/_problem.py | 11 +++----
pybop/costs/base_cost.py | 1 +
pybop/costs/design_costs.py | 1 -
pybop/costs/fitting_costs.py | 60 +++++++++++++-----------------------
5 files changed, 32 insertions(+), 46 deletions(-)
diff --git a/conftest.py b/conftest.py
index 3641cbd1..d8e79fcd 100644
--- a/conftest.py
+++ b/conftest.py
@@ -1,6 +1,7 @@
import pytest
import matplotlib
import plotly
+import sys
plotly.io.renderers.default = None
matplotlib.use("Template")
@@ -42,6 +43,10 @@ def pytest_configure(config):
config.addinivalue_line("markers", "plots: mark test as a plot test")
config.addinivalue_line("markers", "notebook: mark test as a notebook test")
+ if sys.platform.startswith("win"):
+ # Set the output encoding to UTF-8 on Windows
+ sys.stdout = open(sys.stdout.fileno(), mode="w", encoding="utf8", buffering=1)
+
def pytest_collection_modifyitems(config, items):
options = {
diff --git a/pybop/_problem.py b/pybop/_problem.py
index f1f0e8c8..d1e6d394 100644
--- a/pybop/_problem.py
+++ b/pybop/_problem.py
@@ -147,9 +147,9 @@ class FittingProblem(BaseProblem):
dataset : Dataset
Dataset object containing the data to fit the model to.
signal : str, optional
- The signal to fit (default: "Voltage [V]").
+ The variable used for fitting (default: "Voltage [V]").
additional_variables : List[str], optional
- Additional variables to observe and store in the solution (default: []).
+ Additional variables to observe and store in the solution (default additions are: ["Time [s]"]).
init_soc : float, optional
Initial state of charge (default: None).
x0 : np.ndarray, optional
@@ -167,7 +167,7 @@ def __init__(
init_soc=None,
x0=None,
):
- additional_variables += ["Time [s]", "Discharge capacity [A.h]"]
+ additional_variables += ["Time [s]"]
super().__init__(
parameters, model, check_model, signal, additional_variables, init_soc, x0
)
@@ -271,7 +271,7 @@ class DesignProblem(BaseProblem):
signal : str, optional
The signal to fit (default: "Voltage [V]").
additional_variables : List[str], optional
- Additional variables to observe and store in the solution (default: []).
+ Additional variables to observe and store in the solution (default additions are: ["Time [s]", "Current [A]"]).
init_soc : float, optional
Initial state of charge (default: None).
x0 : np.ndarray, optional
@@ -289,7 +289,7 @@ def __init__(
init_soc=None,
x0=None,
):
- additional_variables += ["Time [s]", "Current [A]", "Discharge capacity [A.h]"]
+ additional_variables += ["Time [s]", "Current [A]"]
super().__init__(
parameters, model, check_model, signal, additional_variables, init_soc, x0
)
@@ -313,7 +313,6 @@ def __init__(
# Add an example dataset for plotting comparison
sol = self.evaluate(self.x0)
self._time_data = sol["Time [s]"]
- self._capacity_data = sol["Discharge capacity [A.h]"]
self._target = {key: sol[key] for key in self.signal}
self._dataset = None
diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py
index bdafec3c..6cbd2a19 100644
--- a/pybop/costs/base_cost.py
+++ b/pybop/costs/base_cost.py
@@ -33,6 +33,7 @@ def __init__(self, problem):
self.bounds = problem.bounds
self.n_parameters = problem.n_parameters
self.n_outputs = problem.n_outputs
+ self.signal = problem.signal
def __call__(self, x, grad=None):
"""
diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py
index f2b452af..e1fb38cb 100644
--- a/pybop/costs/design_costs.py
+++ b/pybop/costs/design_costs.py
@@ -61,7 +61,6 @@ def update_simulation_data(self, initial_conditions):
if "Time [s]" not in solution:
raise ValueError("The solution does not contain time data.")
self.problem._time_data = solution["Time [s]"]
- self.problem._capacity_data = solution["Discharge capacity [A.h]"]
self.problem._target = {key: solution[key] for key in self.problem.signal}
self.dt = solution["Time [s]"][1] - solution["Time [s]"][0]
diff --git a/pybop/costs/fitting_costs.py b/pybop/costs/fitting_costs.py
index eb21001a..3e734911 100644
--- a/pybop/costs/fitting_costs.py
+++ b/pybop/costs/fitting_costs.py
@@ -39,16 +39,14 @@ def _evaluate(self, x, grad=None):
"""
prediction = self.problem.evaluate(x)
- for key in prediction:
- if key not in ["Time [s]", "Discharge capacity [A.h]"]:
- if len(prediction.get(key, [])) != len(self._target.get(key, [])):
- return np.float64(np.inf) # prediction doesn't match target
+ for key in self.signal:
+ if len(prediction.get(key, [])) != len(self._target.get(key, [])):
+ return np.float64(np.inf) # prediction doesn't match target
e = np.array(
[
np.sqrt(np.mean((prediction[signal] - self._target[signal]) ** 2))
- for signal in prediction
- if signal not in ["Time [s]", "Discharge capacity [A.h]"]
+ for signal in self.signal
]
)
@@ -79,20 +77,13 @@ def _evaluateS1(self, x):
"""
y, dy = self.problem.evaluateS1(x)
- for key in y:
- if key not in ["Time [s]", "Discharge capacity [A.h]"]:
- if len(y.get(key, [])) != len(self._target.get(key, [])):
- e = np.float64(np.inf)
- de = self._de * np.ones(self.n_parameters)
- return e, de
+ for key in self.signal:
+ if len(y.get(key, [])) != len(self._target.get(key, [])):
+ e = np.float64(np.inf)
+ de = self._de * np.ones(self.n_parameters)
+ return e, de
- r = np.array(
- [
- y[signal] - self._target[signal]
- for signal in y
- if signal not in ["Time [s]", "Discharge capacity [A.h]"]
- ]
- )
+ r = np.array([y[signal] - self._target[signal] for signal in self.signal])
if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
@@ -156,16 +147,14 @@ def _evaluate(self, x, grad=None):
"""
prediction = self.problem.evaluate(x)
- for key in prediction:
- if key not in ["Time [s]", "Discharge capacity [A.h]"]:
- if len(prediction.get(key, [])) != len(self._target.get(key, [])):
- return np.float64(np.inf) # prediction doesn't match target
+ for key in self.signal:
+ if len(prediction.get(key, [])) != len(self._target.get(key, [])):
+ return np.float64(np.inf) # prediction doesn't match target
e = np.array(
[
np.sum(((prediction[signal] - self._target[signal]) ** 2))
- for signal in prediction
- if signal not in ["Time [s]", "Discharge capacity [A.h]"]
+ for signal in self.signal
]
)
if self.n_outputs == 1:
@@ -194,20 +183,13 @@ def _evaluateS1(self, x):
If an error occurs during the calculation of the cost or gradient.
"""
y, dy = self.problem.evaluateS1(x)
- for key in y:
- if key not in ["Time [s]", "Discharge capacity [A.h]"]:
- if len(y.get(key, [])) != len(self._target.get(key, [])):
- e = np.float64(np.inf)
- de = self._de * np.ones(self.n_parameters)
- return e, de
-
- r = np.array(
- [
- y[signal] - self._target[signal]
- for signal in y
- if signal not in ["Time [s]", "Discharge capacity [A.h]"]
- ]
- )
+ for key in self.signal:
+ if len(y.get(key, [])) != len(self._target.get(key, [])):
+ e = np.float64(np.inf)
+ de = self._de * np.ones(self.n_parameters)
+ return e, de
+
+ r = np.array([y[signal] - self._target[signal] for signal in self.signal])
if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
From 6c6494aa8dfb721e1a7fd0ed88f5615ece08df91 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Fri, 8 Mar 2024 19:47:56 +0000
Subject: [PATCH 37/64] Update spm_MLE example
---
examples/scripts/spm_MLE.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py
index 7aa4254a..6e822c4a 100644
--- a/examples/scripts/spm_MLE.py
+++ b/examples/scripts/spm_MLE.py
@@ -54,7 +54,7 @@
print("Estimated parameters:", x)
# Plot the timeseries output
-pybop.quick_plot(x[0:2], likelihood, title="Optimised Comparison")
+pybop.quick_plot(problem, parameter_values=x[0:2], title="Optimised Comparison")
# Plot convergence
pybop.plot_convergence(optim)
@@ -65,6 +65,6 @@
# Plot the cost landscape
pybop.plot_cost2d(likelihood, steps=15)
-# Plot the cost landscape with optimisation path and updated bounds
+# Plot the cost landscape with optimisation path
bounds = np.array([[0.55, 0.77], [0.48, 0.68]])
-pybop.plot_cost2d(likelihood, optim=optim, bounds=bounds, steps=15)
+pybop.plot_optim2d(optim, bounds=bounds, steps=15)
From 9b0373477eacd7ca84286dee38d7dea2eac99de4 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Wed, 13 Mar 2024 13:00:40 +0000
Subject: [PATCH 38/64] Updt. cost2d/optim2d x0 shape/colour, revert conftest
win platform unicode, add infeasible unit test
---
conftest.py | 5 ----
pybop/plotting/plot_convergence.py | 30 ++++++++++-----------
tests/integration/test_parameterisations.py | 4 +--
tests/unit/test_optimisation.py | 11 ++++++++
4 files changed, 28 insertions(+), 22 deletions(-)
diff --git a/conftest.py b/conftest.py
index d8e79fcd..3641cbd1 100644
--- a/conftest.py
+++ b/conftest.py
@@ -1,7 +1,6 @@
import pytest
import matplotlib
import plotly
-import sys
plotly.io.renderers.default = None
matplotlib.use("Template")
@@ -43,10 +42,6 @@ def pytest_configure(config):
config.addinivalue_line("markers", "plots: mark test as a plot test")
config.addinivalue_line("markers", "notebook: mark test as a notebook test")
- if sys.platform.startswith("win"):
- # Set the output encoding to UTF-8 on Windows
- sys.stdout = open(sys.stdout.fileno(), mode="w", encoding="utf8", buffering=1)
-
def pytest_collection_modifyitems(config, items):
options = {
diff --git a/pybop/plotting/plot_convergence.py b/pybop/plotting/plot_convergence.py
index b5963179..53324b57 100644
--- a/pybop/plotting/plot_convergence.py
+++ b/pybop/plotting/plot_convergence.py
@@ -98,35 +98,35 @@ def plot_optim2d(optim, bounds=None, steps=10, show=True, **layout_kwargs):
# Import plotly only when needed
go = pybop.PlotlyManager().go
- # Plot the initial guess
+ # Plot the optimisation trace
+ optim_trace = np.array([item for sublist in optim.log for item in sublist])
+ optim_trace = optim_trace.reshape(-1, 2)
fig.add_trace(
go.Scatter(
- x=[optim.x0[0]],
- y=[optim.x0[1]],
+ x=optim_trace[:, 0],
+ y=optim_trace[:, 1],
mode="markers",
- marker_symbol="x",
marker=dict(
- color="red",
- line_color="midnightblue",
- line_width=1,
- size=12,
+ color=[i / len(optim_trace) for i in range(len(optim_trace))],
+ colorscale="YlOrBr",
showscale=False,
),
showlegend=False,
)
)
- # Plot the optimisation trace
- optim_trace = np.array([item for sublist in optim.log for item in sublist])
- optim_trace = optim_trace.reshape(-1, 2)
+ # Plot the initial guess
fig.add_trace(
go.Scatter(
- x=optim_trace[:, 0],
- y=optim_trace[:, 1],
+ x=[optim.x0[0]],
+ y=[optim.x0[1]],
mode="markers",
+ marker_symbol="circle",
marker=dict(
- color=[i / len(optim_trace) for i in range(len(optim_trace))],
- colorscale="YlOrBr",
+ color="mediumspringgreen",
+ line_color="mediumspringgreen",
+ line_width=1,
+ size=14,
showscale=False,
),
showlegend=False,
diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py
index 33ad4f36..0aecf615 100644
--- a/tests/integration/test_parameterisations.py
+++ b/tests/integration/test_parameterisations.py
@@ -91,7 +91,7 @@ def test_spm_optimisers(self, optimiser, spm_costs):
if optimiser in [pybop.CMAES]:
parameterisation.set_f_guessed_tracking(True)
- parameterisation.cost.problem._model.allow_infeasible_solutions = False
+ parameterisation.cost.problem.model.allow_infeasible_solutions = False
assert parameterisation._use_f_guessed is True
parameterisation.set_max_iterations(1)
x, final_cost = parameterisation.run()
@@ -115,7 +115,7 @@ def test_spm_optimisers(self, optimiser, spm_costs):
x, final_cost = parameterisation.run()
elif optimiser in [pybop.SciPyMinimize]:
- parameterisation.cost.problem._model.allow_infeasible_solutions = False
+ parameterisation.cost.problem.model.allow_infeasible_solutions = False
x, final_cost = parameterisation.run()
else:
diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py
index 6569d1ad..7e1a4b10 100644
--- a/tests/unit/test_optimisation.py
+++ b/tests/unit/test_optimisation.py
@@ -122,6 +122,17 @@ def test_halting(self, cost):
with pytest.raises(ValueError):
optim.set_max_unchanged_iterations(1, threshold=-1)
+ @pytest.mark.unit
+ def test_infeasible_solutions(self, cost):
+ # Test infeasible solutions
+ for optimiser in [pybop.SciPyMinimize, pybop.GradientDescent]:
+ optim = pybop.Optimisation(
+ cost=cost, optimiser=optimiser, allow_infeasible_solutions=False
+ )
+ optim.set_max_iterations(1)
+ optim.run()
+ assert optim._iterations == 1
+
@pytest.mark.unit
def test_unphysical_result(self, cost):
# Trigger parameters not physically viable warning
From e7aef79b6b102bf0ca67096f1bee6f20f36a09fb Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Wed, 13 Mar 2024 13:43:23 +0000
Subject: [PATCH 39/64] Updt SciPy & BaseOptimiser for maximum iterations limit
- fixes #237
---
pybop/_optimisation.py | 5 +++--
pybop/optimisers/base_optimiser.py | 2 +-
pybop/optimisers/scipy_optimisers.py | 16 ++++------------
3 files changed, 8 insertions(+), 15 deletions(-)
diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py
index d5114b28..04e0b4ad 100644
--- a/pybop/_optimisation.py
+++ b/pybop/_optimisation.py
@@ -175,15 +175,16 @@ def _run_pybop(self):
final_cost : float
The final cost associated with the best parameters.
"""
- x, final_cost = self.optimiser.optimise(
+ result = self.optimiser.optimise(
cost_function=self.cost,
x0=self.x0,
bounds=self.bounds,
maxiter=self._max_iterations,
)
self.log = self.optimiser.log
+ self._iterations = result.nit
- return x, final_cost
+ return result.x, self.cost(result.x)
def _run_pints(self):
"""
diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py
index 29cc219a..f8796bc9 100644
--- a/pybop/optimisers/base_optimiser.py
+++ b/pybop/optimisers/base_optimiser.py
@@ -38,7 +38,7 @@ def optimise(self, cost_function, x0=None, bounds=None, maxiter=None):
self.cost_function = cost_function
self.x0 = x0
self.bounds = bounds
- self.maxiter = maxiter
+ self._max_iterations = maxiter
# Run optimisation
result = self._runoptimise(self.cost_function, x0=self.x0, bounds=self.bounds)
diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py
index f4ea49a7..5fc12ea6 100644
--- a/pybop/optimisers/scipy_optimisers.py
+++ b/pybop/optimisers/scipy_optimisers.py
@@ -80,7 +80,7 @@ def cost_wrapper(x):
else:
self.options.pop("maxiter", None)
- output = minimize(
+ result = minimize(
cost_wrapper,
x0,
method=self.method,
@@ -89,11 +89,7 @@ def cost_wrapper(x):
callback=callback,
)
- # Get performance statistics
- x = output.x
- final_cost = cost_function(x)
-
- return x, final_cost
+ return result
def needs_sensitivities(self):
"""
@@ -182,7 +178,7 @@ def callback(x, convergence):
(lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"])
]
- output = differential_evolution(
+ result = differential_evolution(
cost_function,
bounds,
strategy=self.strategy,
@@ -191,11 +187,7 @@ def callback(x, convergence):
callback=callback,
)
- # Get performance statistics
- x = output.x
- final_cost = output.fun
-
- return x, final_cost
+ return result
def set_population_size(self, population_size=None):
"""
From afd4990f84ff4acfd0d07a944beea270df9c118e Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Wed, 13 Mar 2024 15:25:37 +0000
Subject: [PATCH 40/64] add infeasible cost tests, remove redundant
scipyminimise maxiter options check
---
pybop/optimisers/scipy_optimisers.py | 13 ++++---------
tests/unit/test_cost.py | 2 +-
2 files changed, 5 insertions(+), 10 deletions(-)
diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py
index 5fc12ea6..010c653b 100644
--- a/pybop/optimisers/scipy_optimisers.py
+++ b/pybop/optimisers/scipy_optimisers.py
@@ -49,9 +49,10 @@ def _runoptimise(self, cost_function, x0, bounds):
A tuple (x, final_cost) containing the optimized parameters and the value of `cost_function` at the optimum.
"""
- # Add callback storing history of parameter values
self.log = [[x0]]
+ self.options = {"maxiter": self._max_iterations}
+ # Add callback storing history of parameter values
def callback(x):
self.log.append([x])
@@ -74,12 +75,6 @@ def cost_wrapper(x):
(lower, upper) for lower, upper in zip(bounds["lower"], bounds["upper"])
)
- # Set max iterations
- if self._max_iterations is not None:
- self.options = {"maxiter": self._max_iterations}
- else:
- self.options.pop("maxiter", None)
-
result = minimize(
cost_wrapper,
x0,
@@ -158,6 +153,8 @@ def _runoptimise(self, cost_function, x0=None, bounds=None):
A tuple (x, final_cost) containing the optimized parameters and the value of ``cost_function`` at the optimum.
"""
+ self.log = []
+
if bounds is None:
raise ValueError("Bounds must be specified for differential_evolution.")
@@ -167,8 +164,6 @@ def _runoptimise(self, cost_function, x0=None, bounds=None):
)
# Add callback storing history of parameter values
- self.log = []
-
def callback(x, convergence):
self.log.append([x])
diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py
index 7d3a4ea6..b6f4daf3 100644
--- a/tests/unit/test_cost.py
+++ b/tests/unit/test_cost.py
@@ -153,10 +153,10 @@ def test_costs(self, cost):
for i in range(len(record)):
assert "Non-physical point encountered" in str(record[i].message)
- if isinstance(cost, pybop.RootMeanSquaredError):
# Test infeasible locations
cost.problem._model.allow_infeasible_solutions = False
assert cost([1.1]) == np.inf
+ assert cost.evaluateS1([1.1]) == (np.inf, cost._de)
# Test exception for non-numeric inputs
with pytest.raises(ValueError):
From db284402100e0b1bb8c3ed0d4b6dad98f849ed8f Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Fri, 15 Mar 2024 10:08:06 +0000
Subject: [PATCH 41/64] Updt grad descent hypers for likelihood tests, add tol
arg to scipy optimisers, pass optimiser final cost as is
---
pybop/_optimisation.py | 8 ++++----
pybop/costs/_likelihoods.py | 2 +-
pybop/optimisers/scipy_optimisers.py | 10 ++++++++--
tests/integration/test_parameterisations.py | 12 ++++++++----
4 files changed, 21 insertions(+), 11 deletions(-)
diff --git a/pybop/_optimisation.py b/pybop/_optimisation.py
index b0a81905..a7bd6732 100644
--- a/pybop/_optimisation.py
+++ b/pybop/_optimisation.py
@@ -156,8 +156,6 @@ def run(self):
x, final_cost = self._run_pints()
elif not self.pints:
x, final_cost = self._run_pybop()
- if not self._minimising:
- final_cost = -final_cost
# Store the optimised parameters
if self.cost.problem is not None:
@@ -374,8 +372,10 @@ def _run_pints(self):
# Store the optimised parameters
self.store_optimised_parameters(x)
- # Return best position and score
- return x, f if self._minimising else -f
+ # Return best position and the score used internally,
+ # i.e the negative log-likelihood in the case of
+ # self._minimising = False
+ return x, f
def f_guessed_tracking(self):
"""
diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py
index 23953573..5af2805a 100644
--- a/pybop/costs/_likelihoods.py
+++ b/pybop/costs/_likelihoods.py
@@ -96,7 +96,7 @@ def _evaluateS1(self, x, grad=None):
for key in self.signal:
if len(y.get(key, [])) != len(self._target.get(key, [])):
likelihood = np.float64(np.inf)
- dl = self._de * np.ones(self.n_parameters)
+ dl = self._dl * np.ones(self.n_parameters)
return -likelihood, -dl
r = np.array([self._target[signal] - y[signal] for signal in self.signal])
diff --git a/pybop/optimisers/scipy_optimisers.py b/pybop/optimisers/scipy_optimisers.py
index a3bac83d..d3a6177c 100644
--- a/pybop/optimisers/scipy_optimisers.py
+++ b/pybop/optimisers/scipy_optimisers.py
@@ -20,10 +20,11 @@ class SciPyMinimize(BaseOptimiser):
Maximum number of iterations to perform.
"""
- def __init__(self, method=None, bounds=None, maxiter=None):
+ def __init__(self, method=None, bounds=None, maxiter=None, tol=1e-5):
super().__init__()
self.method = method
self.bounds = bounds
+ self.tol = tol
self.options = {}
self._max_iterations = maxiter
@@ -79,6 +80,7 @@ def cost_wrapper(x):
x0,
method=self.method,
bounds=bounds,
+ tol=self.tol,
options=self.options,
callback=callback,
)
@@ -126,8 +128,11 @@ class SciPyDifferentialEvolution(BaseOptimiser):
The number of individuals in the population. Defaults to 15.
"""
- def __init__(self, bounds=None, strategy="best1bin", maxiter=1000, popsize=15):
+ def __init__(
+ self, bounds=None, strategy="best1bin", maxiter=1000, popsize=15, tol=1e-5
+ ):
super().__init__()
+ self.tol = tol
self.strategy = strategy
self._max_iterations = maxiter
self._population_size = popsize
@@ -178,6 +183,7 @@ def callback(x, convergence):
strategy=self.strategy,
maxiter=self._max_iterations,
popsize=self._population_size,
+ tol=self.tol,
callback=callback,
)
diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py
index c9ac02b0..73c87de4 100644
--- a/tests/integration/test_parameterisations.py
+++ b/tests/integration/test_parameterisations.py
@@ -70,7 +70,7 @@ def spm_costs(self, model, parameters, cost_class, init_soc):
model, parameters, dataset, signal=signal, init_soc=init_soc
)
if cost_class in [pybop.GaussianLogLikelihoodKnownSigma]:
- return cost_class(problem, sigma=[0.05, 0.05])
+ return cost_class(problem, sigma=[0.03, 0.03])
else:
return cost_class(problem)
@@ -123,7 +123,11 @@ def test_spm_optimisers(self, optimiser, spm_costs):
assert parameterisation._max_iterations == 125
elif optimiser in [pybop.GradientDescent]:
- parameterisation.optimiser.set_learning_rate(0.02)
+ if isinstance(spm_costs, pybop.GaussianLogLikelihoodKnownSigma):
+ parameterisation.optimiser.set_learning_rate(1.8e-5)
+ parameterisation.set_min_iterations(150)
+ else:
+ parameterisation.optimiser.set_learning_rate(0.02)
parameterisation.set_max_iterations(150)
x, final_cost = parameterisation.run()
@@ -196,9 +200,9 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost):
# Test each optimiser
parameterisation = pybop.Optimisation(
- cost=spm_two_signal_cost, optimiser=multi_optimiser, sigma0=0.05
+ cost=spm_two_signal_cost, optimiser=multi_optimiser, sigma0=0.03
)
- parameterisation.set_max_unchanged_iterations(iterations=15, threshold=5e-4)
+ parameterisation.set_max_unchanged_iterations(iterations=35, threshold=5e-4)
parameterisation.set_max_iterations(125)
initial_cost = parameterisation.cost(spm_two_signal_cost.x0)
From 61d7d7ae5d6e0d1a385a6db020ad31b947fc2cec Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Fri, 15 Mar 2024 10:42:38 +0000
Subject: [PATCH 42/64] Split kaleido dependancy to avoid windows hang
---
pyproject.toml | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 36eb03a4..311e71c0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,7 +26,12 @@ dependencies = [
]
[project.optional-dependencies]
-plot = ["plotly>=5.0", "kaleido>=0.2"]
+# Split kaleido into two dependencies to avoid Windows hang
+# See: https://github.com/plotly/Kaleido/issues/110
+plot = ["plotly>=5.0",
+ "kaleido==0.1.0.post1; sys_platform == 'win32'",
+ "kaleido>=0.2; sys_platform != 'win32'",
+]
docs = [
"pydata-sphinx-theme",
"sphinx>=6",
From 744d1663f32db734653c31d6970da62636ae21a0 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Fri, 15 Mar 2024 12:08:31 +0000
Subject: [PATCH 43/64] small refactors and cleanup
---
pybop/_problem.py | 12 +++++++++---
pybop/costs/_likelihoods.py | 11 ++++-------
pybop/costs/design_costs.py | 6 +++---
pybop/models/lithium_ion/echem_base.py | 6 ++----
4 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/pybop/_problem.py b/pybop/_problem.py
index 3fa07ec6..de01c600 100644
--- a/pybop/_problem.py
+++ b/pybop/_problem.py
@@ -49,7 +49,7 @@ def __init__(
self._time_data = None
self._target = None
- if isinstance(model, (pybop.BaseModel, pybop.lithium_ion.EChemBaseModel)):
+ if isinstance(model, pybop.BaseModel):
self.additional_variables = additional_variables
else:
self.additional_variables = []
@@ -186,7 +186,10 @@ def __init__(
init_soc=None,
x0=None,
):
- additional_variables += ["Time [s]"]
+ # Add time and remove duplicates
+ additional_variables.extend(["Time [s]"])
+ additional_variables = list(set(additional_variables))
+
super().__init__(
parameters, model, check_model, signal, additional_variables, init_soc, x0
)
@@ -308,7 +311,10 @@ def __init__(
init_soc=None,
x0=None,
):
- additional_variables += ["Time [s]", "Current [A]"]
+ # Add time and current and remove duplicates
+ additional_variables.extend(["Time [s]", "Current [A]"])
+ additional_variables = list(set(additional_variables))
+
super().__init__(
parameters, model, check_model, signal, additional_variables, init_soc, x0
)
diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py
index 5af2805a..51254f7f 100644
--- a/pybop/costs/_likelihoods.py
+++ b/pybop/costs/_likelihoods.py
@@ -91,7 +91,6 @@ def _evaluateS1(self, x, grad=None):
Calls the problem.evaluateS1 method and calculates
the log-likelihood
"""
-
y, dy = self.problem.evaluateS1(x)
for key in self.signal:
if len(y.get(key, [])) != len(self._target.get(key, [])):
@@ -100,16 +99,15 @@ def _evaluateS1(self, x, grad=None):
return -likelihood, -dl
r = np.array([self._target[signal] - y[signal] for signal in self.signal])
+ likelihood = self._evaluate(x)
if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
- likelihood = self._evaluate(x)
dl = self.sigma2 * np.sum((r * dy), axis=1)
return likelihood, dl
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
- likelihood = self._evaluate(x)
dl = self.sigma2 * np.sum((r[:, :, np.newaxis] * dy), axis=1)
return likelihood, np.sum(dl, axis=1)
@@ -176,28 +174,27 @@ def _evaluateS1(self, x, grad=None):
"""
sigma = np.asarray(x[-self.n_outputs :])
if np.any(sigma <= 0):
- return -np.float64(np.inf), self._de * np.ones(self.n_parameters)
+ return -np.float64(np.inf), -self._dl * np.ones(self.n_parameters)
y, dy = self.problem.evaluateS1(x[: -self.n_outputs])
for key in self.signal:
if len(y.get(key, [])) != len(self._target.get(key, [])):
likelihood = np.float64(np.inf)
- dl = self._de * np.ones(self.n_parameters)
+ dl = self._dl * np.ones(self.n_parameters)
return -likelihood, -dl
r = np.array([self._target[signal] - y[signal] for signal in self.signal])
+ likelihood = self._evaluate(x)
if self.n_outputs == 1:
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
- likelihood = self._evaluate(x)
dl = sigma ** (-2.0) * np.sum((r * dy), axis=1)
dsigma = -self._n_times / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
return likelihood, dl
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
- likelihood = self._evaluate(x)
dl = sigma ** (-2.0) * np.sum((r[:, :, np.newaxis] * dy), axis=1)
dsigma = -self._n_times / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
diff --git a/pybop/costs/design_costs.py b/pybop/costs/design_costs.py
index e1fb38cb..16dd8a0f 100644
--- a/pybop/costs/design_costs.py
+++ b/pybop/costs/design_costs.py
@@ -1,7 +1,7 @@
-import pybop
import numpy as np
import warnings
+from pybop import is_numeric
from pybop.costs.base_cost import BaseCost
@@ -114,7 +114,7 @@ def _evaluate(self, x, grad=None):
float
The negative gravimetric energy density or infinity in case of infeasible parameters.
"""
- if not all(pybop.is_numeric(i) for i in x):
+ if not all(is_numeric(i) for i in x):
raise ValueError("Input must be a numeric array.")
try:
@@ -173,7 +173,7 @@ def _evaluate(self, x, grad=None):
float
The negative volumetric energy density or infinity in case of infeasible parameters.
"""
- if not all(pybop.is_numeric(i) for i in x):
+ if not all(is_numeric(i) for i in x):
raise ValueError("Input must be a numeric array.")
try:
with warnings.catch_warnings():
diff --git a/pybop/models/lithium_ion/echem_base.py b/pybop/models/lithium_ion/echem_base.py
index 6a642ff3..c4664a31 100644
--- a/pybop/models/lithium_ion/echem_base.py
+++ b/pybop/models/lithium_ion/echem_base.py
@@ -227,10 +227,8 @@ def approximate_capacity(self, x):
average_voltage = positive_electrode_ocp(
mean_sto_pos
) - negative_electrode_ocp(mean_sto_neg)
- except TypeError:
- average_voltage = positive_electrode_ocp([mean_sto_pos]).evaluate()[0][
- 0
- ] - negative_electrode_ocp(mean_sto_neg) # Super hacky, needs to be fixed
+ except Exception as e:
+ raise ValueError(f"Error in average voltage calculation: {e}")
# Calculate and update nominal capacity
theoretical_capacity = theoretical_energy / average_voltage
From c1b38546872bbe2c3200b3a0bc4f90e6711d22bc Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Fri, 15 Mar 2024 12:33:52 +0000
Subject: [PATCH 44/64] Updt changelog
---
CHANGELOG.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7bfc4528..043f2d30 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,8 @@
## Features
+- [#198](https://github.com/pybop-team/PyBOP/pull/198) - Adds default subplot trace options, removes `[]` in axis plots as per SI standard, add varying signal length to quick_plot, restores design optimisation execption.
+- [#224](https://github.com/pybop-team/PyBOP/pull/224) - Updated prediction objects to dictionaries, cost class calculations, added `additional_variables` argument to problem class, updated scipy.minimize defualt method to Nelder-Mead, added gradient cost landscape plots with optional argument.
- [#218](https://github.com/pybop-team/PyBOP/pull/218) - Adds likelihood base class, `GaussianLogLikelihoodKnownSigma`, `GaussianLogLikelihood`, and `ProbabilityBased` cost function. As well as addition of a maximum likelihood estimation (MLE) example.
- [#185](https://github.com/pybop-team/PyBOP/pull/185) - Adds a pull request template, additional nox sessions `quick` for standard tests + docs, `pre-commit` for pre-commit, `test` to run all standard tests, `doctest` for docs.
- [#215](https://github.com/pybop-team/PyBOP/pull/215) - Adds `release_workflow.md` and updates `release_action.yaml`
From 716c671733b8dc70244e9d4caa478df0fa12e118 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Tue, 19 Mar 2024 12:08:27 +0000
Subject: [PATCH 45/64] updt coverage, bugfix sigma check/wrap
---
examples/standalone/problem.py | 4 ++--
pybop/costs/_likelihoods.py | 10 +++++-----
tests/unit/test_cost.py | 1 +
tests/unit/test_likelihoods.py | 11 ++++++-----
4 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/examples/standalone/problem.py b/examples/standalone/problem.py
index bc9cd31d..45f5a602 100644
--- a/examples/standalone/problem.py
+++ b/examples/standalone/problem.py
@@ -14,12 +14,12 @@ def __init__(
model=None,
check_model=True,
signal=None,
- default_variables=None,
+ additional_variables=None,
init_soc=None,
x0=None,
):
super().__init__(
- parameters, model, check_model, signal, default_variables, init_soc, x0
+ parameters, model, check_model, signal, additional_variables, init_soc, x0
)
self._dataset = dataset.data
diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py
index 51254f7f..25408db4 100644
--- a/pybop/costs/_likelihoods.py
+++ b/pybop/costs/_likelihoods.py
@@ -16,11 +16,11 @@ def set_sigma(self, sigma):
Setter for sigma parameter
"""
- if sigma is not type(np.array([])):
- try:
- sigma = np.array(sigma)
- except Exception:
- raise ValueError("Sigma must be a numpy array")
+ if not isinstance(sigma, np.ndarray):
+ sigma = np.array([sigma])
+
+ if not np.isreal(sigma).all():
+ raise ValueError("Sigma must contain only numeric values")
if np.any(sigma <= 0):
raise ValueError("Sigma must not be negative")
diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py
index 693e3944..7dc81eba 100644
--- a/tests/unit/test_cost.py
+++ b/tests/unit/test_cost.py
@@ -195,6 +195,7 @@ def test_energy_density_costs(
assert cost([0.4]) <= 0 # Should be a viable design
assert cost([0.8]) == np.inf # Should exceed active material + porosity < 1
assert cost([1.4]) == np.inf # Definitely not viable
+ assert cost([-0.1]) == np.inf # Should not be a viable design
# Test infeasible locations
cost.problem._model.allow_infeasible_solutions = False
diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py
index 02db1e87..2aeb8ed7 100644
--- a/tests/unit/test_likelihoods.py
+++ b/tests/unit/test_likelihoods.py
@@ -74,6 +74,8 @@ def test_base_likelihood_init(self, problem):
assert likelihood.bounds == problem.bounds
assert likelihood._n_parameters == 1
assert np.array_equal(likelihood._target, problem._target)
+ with pytest.raises(ValueError):
+ likelihood.set_sigma("Test")
@pytest.mark.unit
def test_base_likelihood_call_raises_not_implemented_error(self, problem):
@@ -126,9 +128,8 @@ def test_gaussian_log_likelihood(self, problem):
assert np.all(grad_likelihood <= 0)
@pytest.mark.unit
- def test_gaussian_log_likelihood_call_returns_negative_inf_for_non_positive_sigma(
- self, problem
- ):
+ def test_gaussian_log_likelihood_call_returns_negative_inf(self, problem):
likelihood = pybop.GaussianLogLikelihood(problem)
- result = likelihood(np.array([-0.5]))
- assert result == -np.inf
+ assert likelihood(np.array([-0.5])) == -np.inf # negative value
+ with pytest.raises(ValueError):
+ assert likelihood(np.array([0.3])) == -np.inf # parameter value too small
From a479136c2bdf8dbca25d67c7d8717f079b792c79 Mon Sep 17 00:00:00 2001
From: Brady Planden
Date: Tue, 19 Mar 2024 15:02:20 +0000
Subject: [PATCH 46/64] coverage, bugfix model.simulateS1
---
pybop/costs/_likelihoods.py | 16 ++++++++--------
pybop/models/base_model.py | 4 +++-
tests/unit/test_cost.py | 2 ++
tests/unit/test_likelihoods.py | 23 +++++++++++++++++++----
4 files changed, 32 insertions(+), 13 deletions(-)
diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py
index 93fc46e6..dc045f0b 100644
--- a/pybop/costs/_likelihoods.py
+++ b/pybop/costs/_likelihoods.py
@@ -64,18 +64,17 @@ def _evaluate(self, x, grad=None):
Calls the problem.evaluate method and calculates
the log-likelihood
"""
- prediction = self.problem.evaluate(x)
+ y = self.problem.evaluate(x)
for key in self.signal:
- if len(prediction.get(key, [])) != len(self._target.get(key, [])):
+ if len(y.get(key, [])) != len(self._target.get(key, [])):
return -np.float64(np.inf) # prediction doesn't match target
e = np.array(
[
np.sum(
self._offset
- + self._multip
- * np.sum((self._target[signal] - prediction[signal]) ** 2)
+ + self._multip * np.sum((self._target[signal] - y[signal]) ** 2)
)
for signal in self.signal
]
@@ -92,6 +91,7 @@ def _evaluateS1(self, x, grad=None):
the log-likelihood
"""
y, dy = self.problem.evaluateS1(x)
+
for key in self.signal:
if len(y.get(key, [])) != len(self._target.get(key, [])):
likelihood = np.float64(np.inf)
@@ -144,10 +144,10 @@ def _evaluate(self, x, grad=None):
if np.any(sigma <= 0):
return -np.inf
- prediction = self.problem.evaluate(x[: -self.n_outputs])
+ y = self.problem.evaluate(x[: -self.n_outputs])
for key in self.signal:
- if len(prediction.get(key, [])) != len(self._target.get(key, [])):
+ if len(y.get(key, [])) != len(self._target.get(key, [])):
return -np.float64(np.inf) # prediction doesn't match target
e = np.array(
@@ -155,8 +155,7 @@ def _evaluate(self, x, grad=None):
np.sum(
self._logpi
- self._n_times * np.log(sigma)
- - np.sum((self._target[signal] - prediction[signal]) ** 2)
- / (2.0 * sigma**2)
+ - np.sum((self._target[signal] - y[signal]) ** 2) / (2.0 * sigma**2)
)
for signal in self.signal
]
@@ -173,6 +172,7 @@ def _evaluateS1(self, x, grad=None):
the log-likelihood
"""
sigma = np.asarray(x[-self.n_outputs :])
+
if np.any(sigma <= 0):
return -np.float64(np.inf), -self._dl * np.ones(self.n_parameters)
diff --git a/pybop/models/base_model.py b/pybop/models/base_model.py
index cde839b0..0816cc21 100644
--- a/pybop/models/base_model.py
+++ b/pybop/models/base_model.py
@@ -410,7 +410,9 @@ def simulateS1(self, inputs, t_eval):
for signal in self.signal
for key in self.fit_keys
]
- ).reshape(self.n_parameters, self.n_time_data, self.n_outputs)
+ ).reshape(
+ self.n_parameters, sol[self.signal[0]].data.shape[0], self.n_outputs
+ )
return y, dy
diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py
index 7dc81eba..69abef2a 100644
--- a/tests/unit/test_cost.py
+++ b/tests/unit/test_cost.py
@@ -161,6 +161,8 @@ def test_costs(self, cost):
cost.problem._model.allow_infeasible_solutions = False
assert cost([1.1]) == np.inf
assert cost.evaluateS1([1.1]) == (np.inf, cost._de)
+ assert cost([0.01]) == np.inf
+ assert cost.evaluateS1([0.01]) == (np.inf, cost._de)
# Test exception for non-numeric inputs
with pytest.raises(ValueError):
diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py
index 2aeb8ed7..ed29f196 100644
--- a/tests/unit/test_likelihoods.py
+++ b/tests/unit/test_likelihoods.py
@@ -128,8 +128,23 @@ def test_gaussian_log_likelihood(self, problem):
assert np.all(grad_likelihood <= 0)
@pytest.mark.unit
- def test_gaussian_log_likelihood_call_returns_negative_inf(self, problem):
+ def test_gaussian_log_likelihood_returns_negative_inf(self, problem):
likelihood = pybop.GaussianLogLikelihood(problem)
- assert likelihood(np.array([-0.5])) == -np.inf # negative value
- with pytest.raises(ValueError):
- assert likelihood(np.array([0.3])) == -np.inf # parameter value too small
+ assert likelihood(np.array([-0.5, -0.5])) == -np.inf # negative sigma value
+ assert (
+ likelihood.evaluateS1(np.array([-0.5, -0.5]))[0] == -np.inf
+ ) # negative sigma value
+ assert likelihood(np.array([0.01, 0.1])) == -np.inf # parameter value too small
+ assert (
+ likelihood.evaluateS1(np.array([0.01, 0.1]))[0] == -np.inf
+ ) # parameter value too small
+
+ @pytest.mark.unit
+ def test_gaussian_log_likelihood_known_sigma_returns_negative_inf(self, problem):
+ likelihood = pybop.GaussianLogLikelihoodKnownSigma(
+ problem, sigma=np.array([0.2])
+ )
+ assert likelihood(np.array([0.01])) == -np.inf # parameter value too small
+ assert (
+ likelihood.evaluateS1(np.array([0.01]))[0] == -np.inf
+ ) # parameter value too small
From 18a1a4e331a36eb99d3e330ad545d0b5bff05f1d Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 11:55:49 +0000
Subject: [PATCH 47/64] Update to n_time_data and add two_signal test
---
pybop/costs/_likelihoods.py | 12 +++---
tests/unit/test_likelihoods.py | 68 +++++++++++++++++++++-------------
2 files changed, 48 insertions(+), 32 deletions(-)
diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py
index dc045f0b..7a01bb10 100644
--- a/pybop/costs/_likelihoods.py
+++ b/pybop/costs/_likelihoods.py
@@ -9,7 +9,7 @@ class BaseLikelihood(BaseCost):
def __init__(self, problem, sigma=None):
super(BaseLikelihood, self).__init__(problem, sigma)
- self._n_times = problem.n_time_data
+ self.n_time_data = problem.n_time_data
def set_sigma(self, sigma):
"""
@@ -54,7 +54,7 @@ def __init__(self, problem, sigma):
super(GaussianLogLikelihoodKnownSigma, self).__init__(problem, sigma)
if sigma is not None:
self.set_sigma(sigma)
- self._offset = -0.5 * self._n_times * np.log(2 * np.pi / self.sigma0)
+ self._offset = -0.5 * self.n_time_data * np.log(2 * np.pi / self.sigma0)
self._multip = -1 / (2.0 * self.sigma0**2)
self.sigma2 = self.sigma0**-2
self._dl = np.ones(self._n_parameters)
@@ -124,7 +124,7 @@ class GaussianLogLikelihood(BaseLikelihood):
def __init__(self, problem):
super(GaussianLogLikelihood, self).__init__(problem)
- self._logpi = -0.5 * self._n_times * np.log(2 * np.pi)
+ self._logpi = -0.5 * self.n_time_data * np.log(2 * np.pi)
self._dl = np.ones(self._n_parameters + self.n_outputs)
def _evaluate(self, x, grad=None):
@@ -154,7 +154,7 @@ def _evaluate(self, x, grad=None):
[
np.sum(
self._logpi
- - self._n_times * np.log(sigma)
+ - self.n_time_data * np.log(sigma)
- np.sum((self._target[signal] - y[signal]) ** 2) / (2.0 * sigma**2)
)
for signal in self.signal
@@ -190,12 +190,12 @@ def _evaluateS1(self, x, grad=None):
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
dl = sigma ** (-2.0) * np.sum((r * dy), axis=1)
- dsigma = -self._n_times / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
+ dsigma = -self.n_time_data / sigma + sigma ** -(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
return likelihood, dl
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
dl = sigma ** (-2.0) * np.sum((r[:, :, np.newaxis] * dy), axis=1)
- dsigma = -self._n_times / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
+ dsigma = -self.n_time_data / sigma + sigma ** -(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
return likelihood, np.sum(dl, axis=1)
diff --git a/tests/unit/test_likelihoods.py b/tests/unit/test_likelihoods.py
index ed29f196..029f3440 100644
--- a/tests/unit/test_likelihoods.py
+++ b/tests/unit/test_likelihoods.py
@@ -53,22 +53,30 @@ def dataset(self, model, experiment, x0):
)
@pytest.fixture
- def signal(self):
- return "Voltage [V]"
+ def one_signal_problem(self, model, parameters, dataset, x0):
+ signal = ["Voltage [V]"]
+ return pybop.FittingProblem(
+ model, parameters, dataset, signal=signal, x0=x0, init_soc=1.0
+ )
- @pytest.fixture()
- def problem(self, model, parameters, dataset, signal, x0):
- problem = pybop.FittingProblem(
+ @pytest.fixture
+ def two_signal_problem(self, model, parameters, dataset, x0):
+ signal = ["Time [s]", "Voltage [V]"]
+ return pybop.FittingProblem(
model, parameters, dataset, signal=signal, x0=x0, init_soc=1.0
)
- return problem
+ @pytest.mark.parametrize(
+ "problem_name, n_outputs",
+ [("one_signal_problem", 1), ("two_signal_problem", 2)],
+ )
@pytest.mark.unit
- def test_base_likelihood_init(self, problem):
+ def test_base_likelihood_init(self, problem_name, n_outputs, request):
+ problem = request.getfixturevalue(problem_name)
likelihood = pybop.BaseLikelihood(problem, sigma=np.array([0.2]))
assert likelihood.problem == problem
- assert likelihood.n_outputs == 1
- assert likelihood._n_times == problem.n_time_data
+ assert likelihood.n_outputs == n_outputs
+ assert likelihood.n_time_data == problem.n_time_data
assert np.array_equal(likelihood.get_sigma(), np.array([0.2]))
assert likelihood.x0 == problem.x0
assert likelihood.bounds == problem.bounds
@@ -78,37 +86,43 @@ def test_base_likelihood_init(self, problem):
likelihood.set_sigma("Test")
@pytest.mark.unit
- def test_base_likelihood_call_raises_not_implemented_error(self, problem):
- likelihood = pybop.BaseLikelihood(problem)
+ def test_base_likelihood_call_raises_not_implemented_error(
+ self, one_signal_problem
+ ):
+ likelihood = pybop.BaseLikelihood(one_signal_problem)
with pytest.raises(NotImplementedError):
likelihood(np.array([0.5, 0.5]))
@pytest.mark.unit
- def test_base_likelihood_set_get_sigma(self, problem):
- likelihood = pybop.BaseLikelihood(problem)
+ def test_base_likelihood_set_get_sigma(self, one_signal_problem):
+ likelihood = pybop.BaseLikelihood(one_signal_problem)
likelihood.set_sigma(np.array([0.3]))
assert np.array_equal(likelihood.get_sigma(), np.array([0.3]))
@pytest.mark.unit
def test_base_likelihood_set_sigma_raises_value_error_for_negative_sigma(
- self, problem
+ self, one_signal_problem
):
- likelihood = pybop.BaseLikelihood(problem)
+ likelihood = pybop.BaseLikelihood(one_signal_problem)
with pytest.raises(ValueError):
likelihood.set_sigma(np.array([-0.2]))
@pytest.mark.unit
- def test_base_likelihood_get_n_parameters(self, problem):
- likelihood = pybop.BaseLikelihood(problem)
+ def test_base_likelihood_get_n_parameters(self, one_signal_problem):
+ likelihood = pybop.BaseLikelihood(one_signal_problem)
assert likelihood.get_n_parameters() == 1
@pytest.mark.unit
- def test_base_likelihood_n_parameters_property(self, problem):
- likelihood = pybop.BaseLikelihood(problem)
+ def test_base_likelihood_n_parameters_property(self, one_signal_problem):
+ likelihood = pybop.BaseLikelihood(one_signal_problem)
assert likelihood.n_parameters == 1
+ @pytest.mark.parametrize(
+ "problem_name", ["one_signal_problem", "two_signal_problem"]
+ )
@pytest.mark.unit
- def test_gaussian_log_likelihood_known_sigma(self, problem):
+ def test_gaussian_log_likelihood_known_sigma(self, problem_name, request):
+ problem = request.getfixturevalue(problem_name)
likelihood = pybop.GaussianLogLikelihoodKnownSigma(
problem, sigma=np.array([1.0])
)
@@ -119,8 +133,8 @@ def test_gaussian_log_likelihood_known_sigma(self, problem):
assert np.all(grad_likelihood <= 0)
@pytest.mark.unit
- def test_gaussian_log_likelihood(self, problem):
- likelihood = pybop.GaussianLogLikelihood(problem)
+ def test_gaussian_log_likelihood(self, one_signal_problem):
+ likelihood = pybop.GaussianLogLikelihood(one_signal_problem)
result = likelihood(np.array([0.5, 0.5]))
grad_result, grad_likelihood = likelihood.evaluateS1(np.array([0.5, 0.5]))
assert isinstance(result, float)
@@ -128,8 +142,8 @@ def test_gaussian_log_likelihood(self, problem):
assert np.all(grad_likelihood <= 0)
@pytest.mark.unit
- def test_gaussian_log_likelihood_returns_negative_inf(self, problem):
- likelihood = pybop.GaussianLogLikelihood(problem)
+ def test_gaussian_log_likelihood_returns_negative_inf(self, one_signal_problem):
+ likelihood = pybop.GaussianLogLikelihood(one_signal_problem)
assert likelihood(np.array([-0.5, -0.5])) == -np.inf # negative sigma value
assert (
likelihood.evaluateS1(np.array([-0.5, -0.5]))[0] == -np.inf
@@ -140,9 +154,11 @@ def test_gaussian_log_likelihood_returns_negative_inf(self, problem):
) # parameter value too small
@pytest.mark.unit
- def test_gaussian_log_likelihood_known_sigma_returns_negative_inf(self, problem):
+ def test_gaussian_log_likelihood_known_sigma_returns_negative_inf(
+ self, one_signal_problem
+ ):
likelihood = pybop.GaussianLogLikelihoodKnownSigma(
- problem, sigma=np.array([0.2])
+ one_signal_problem, sigma=np.array([0.2])
)
assert likelihood(np.array([0.01])) == -np.inf # parameter value too small
assert (
From 19b247c394146e97e2dec2f26d4ea7a909d30258 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 11:56:09 +0000
Subject: [PATCH 48/64] style: pre-commit fixes
---
pybop/costs/_likelihoods.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/pybop/costs/_likelihoods.py b/pybop/costs/_likelihoods.py
index 7a01bb10..b681100a 100644
--- a/pybop/costs/_likelihoods.py
+++ b/pybop/costs/_likelihoods.py
@@ -190,12 +190,12 @@ def _evaluateS1(self, x, grad=None):
r = r.reshape(self.problem.n_time_data)
dy = dy.reshape(self.n_parameters, self.problem.n_time_data)
dl = sigma ** (-2.0) * np.sum((r * dy), axis=1)
- dsigma = -self.n_time_data / sigma + sigma ** -(3.0) * np.sum(r**2, axis=0)
+ dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
return likelihood, dl
else:
r = r.reshape(self.n_outputs, self.problem.n_time_data)
dl = sigma ** (-2.0) * np.sum((r[:, :, np.newaxis] * dy), axis=1)
- dsigma = -self.n_time_data / sigma + sigma ** -(3.0) * np.sum(r**2, axis=0)
+ dsigma = -self.n_time_data / sigma + sigma**-(3.0) * np.sum(r**2, axis=0)
dl = np.concatenate((dl, dsigma))
return likelihood, np.sum(dl, axis=1)
From 695c2c980031142ff57d184fe3be346e28642099 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 12:39:10 +0000
Subject: [PATCH 49/64] Test standard plot and plot_trajectories
---
tests/unit/test_plots.py | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 8e793e49..bc53f663 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -8,6 +8,16 @@ class TestPlots:
A class to test the plotting classes.
"""
+ @pytest.mark.unit
+ def test_standard_plot(self):
+ # Test standard plot and removal of brackets
+ plot_dict = pybop.StandardPlot(
+ x=np.ones(10),
+ y=np.ones((2, 10)),
+ trace_names=["Trace [1]", "Trace [2]"],
+ )
+ plot_dict()
+
@pytest.fixture
def model(self):
# Define an example model
@@ -46,6 +56,11 @@ def dataset(self, model):
@pytest.mark.unit
def test_dataset_plots(self, dataset):
# Test plotting of Dataset objects
+ pybop.plot_trajectories(
+ dataset["Time [s]"],
+ dataset["Voltage [V]"],
+ trace_names=["Time [s]", "Voltage [V]"],
+ )
pybop.plot_dataset(dataset, signal=["Voltage [V]"])
@pytest.fixture
From 47c00589c18bac078e02a15d9b97ae1d67aa8272 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 12:47:21 +0000
Subject: [PATCH 50/64] Add test_with_ipykernel
---
tests/unit/test_plots.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index bc53f663..3704867e 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -106,3 +106,12 @@ def test_optim_plots(self, optim):
# Plot the cost landscape with optimisation path
pybop.plot_optim2d(optim, steps=5)
+
+ @pytest.mark.unit
+ def test_with_ipykernel(self, dataset, cost, optim):
+ import ipykernel
+ pybop.plot_dataset(dataset, signal=["Voltage [V]"])
+ pybop.plot_cost2d(cost, gradient=True, steps=5)
+ pybop.plot_convergence(optim)
+ pybop.plot_parameters(optim)
+ pybop.plot_optim2d(optim, steps=5)
From 1a80b6fa4cab9e28efe8b51bdc47e4a1e7ee5c62 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 12:47:56 +0000
Subject: [PATCH 51/64] style: pre-commit fixes
---
tests/unit/test_plots.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 3704867e..77f052ec 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -109,7 +109,6 @@ def test_optim_plots(self, optim):
@pytest.mark.unit
def test_with_ipykernel(self, dataset, cost, optim):
- import ipykernel
pybop.plot_dataset(dataset, signal=["Voltage [V]"])
pybop.plot_cost2d(cost, gradient=True, steps=5)
pybop.plot_convergence(optim)
From fa551a0403c9f29f67cd6c4d87dc03ac01420600 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 13:12:39 +0000
Subject: [PATCH 52/64] Re-add test_with_ipykernel
---
tests/unit/test_plots.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 77f052ec..8a415108 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -109,6 +109,9 @@ def test_optim_plots(self, optim):
@pytest.mark.unit
def test_with_ipykernel(self, dataset, cost, optim):
+ import ipykernel
+
+ print(ipykernel.__version__)
pybop.plot_dataset(dataset, signal=["Voltage [V]"])
pybop.plot_cost2d(cost, gradient=True, steps=5)
pybop.plot_convergence(optim)
From b6ce663c8616b7589edcfce731a888fb47cf8694 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 13:39:27 +0000
Subject: [PATCH 53/64] Update test_plots.py
---
tests/unit/test_plots.py | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 8a415108..12f74958 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -10,11 +10,12 @@ class TestPlots:
@pytest.mark.unit
def test_standard_plot(self):
- # Test standard plot and removal of brackets
+ # Test standard plot
+ trace_names = pybop.StandardPlot.remove_brackets(["Trace [1]", "Trace [2]"])
plot_dict = pybop.StandardPlot(
- x=np.ones(10),
+ x=np.ones((2, 10)),
y=np.ones((2, 10)),
- trace_names=["Trace [1]", "Trace [2]"],
+ trace_names=trace_names,
)
plot_dict()
@@ -64,18 +65,31 @@ def test_dataset_plots(self, dataset):
pybop.plot_dataset(dataset, signal=["Voltage [V]"])
@pytest.fixture
- def problem(self, model, parameters, dataset):
+ def fitting_problem(self, model, parameters, dataset):
return pybop.FittingProblem(model, parameters, dataset)
+ @pytest.fixture
+ def experiment(self):
+ return pybop.Experiment(
+ [
+ ("Discharge at 1C for 10 minutes (20 second period)"),
+ ]
+ )
+
+ @pytest.fixture
+ def design_problem(self, model, parameters, experiment):
+ return pybop.DesignProblem(model, parameters, experiment)
+
@pytest.mark.unit
- def test_problem_plots(self, problem):
+ def test_problem_plots(self, fitting_problem, design_problem):
# Test plotting of Problem objects
- pybop.quick_plot(problem, title="Optimised Comparison")
+ pybop.quick_plot(fitting_problem, title="Optimised Comparison")
+ pybop.quick_plot(design_problem)
@pytest.fixture
- def cost(self, problem):
+ def cost(self, fitting_problem):
# Define an example cost
- return pybop.SumSquaredError(problem)
+ return pybop.SumSquaredError(fitting_problem)
@pytest.mark.unit
def test_cost_plots(self, cost):
From f198cbe0773ef81d632e11a608525c4c54c28b30 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 13:46:55 +0000
Subject: [PATCH 54/64] Rename plot_cost2d as plot2d
---
pybop/__init__.py | 2 +-
pybop/plotting/plot_convergence.py | 2 +-
pybop/plotting/plot_cost2d.py | 6 ++----
tests/unit/test_plots.py | 8 ++++----
4 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/pybop/__init__.py b/pybop/__init__.py
index b85f2b84..05fd61dc 100644
--- a/pybop/__init__.py
+++ b/pybop/__init__.py
@@ -112,7 +112,7 @@
#
from .plotting.plotly_manager import PlotlyManager
from .plotting.quick_plot import StandardPlot, StandardSubplot, plot_trajectories
-from .plotting.plot_cost2d import plot_cost2d
+from .plotting.plot_cost2d import plot2d
from .plotting.plot_dataset import plot_dataset
from .plotting.plot_convergence import plot_convergence, plot_optim2d
from .plotting.plot_parameters import plot_parameters
diff --git a/pybop/plotting/plot_convergence.py b/pybop/plotting/plot_convergence.py
index 29ad582a..52e238f0 100644
--- a/pybop/plotting/plot_convergence.py
+++ b/pybop/plotting/plot_convergence.py
@@ -94,7 +94,7 @@ def plot_optim2d(optim, bounds=None, steps=10, show=True, **layout_kwargs):
cost = optim.cost
# Create the cost landscape
- fig = pybop.plot_cost2d(cost, bounds=bounds, steps=steps, show=False)
+ fig = pybop.plot2d(cost, bounds=bounds, steps=steps, show=False)
# Import plotly only when needed
go = pybop.PlotlyManager().go
diff --git a/pybop/plotting/plot_cost2d.py b/pybop/plotting/plot_cost2d.py
index 29c3bbd0..767ff975 100644
--- a/pybop/plotting/plot_cost2d.py
+++ b/pybop/plotting/plot_cost2d.py
@@ -3,9 +3,7 @@
import numpy as np
-def plot_cost2d(
- cost, gradient=False, bounds=None, steps=10, show=True, **layout_kwargs
-):
+def plot2d(cost, gradient=False, bounds=None, steps=10, show=True, **layout_kwargs):
"""
Plot a 2D visualisation of a cost landscape using Plotly.
@@ -147,6 +145,6 @@ def get_param_bounds(cost):
if param.bounds is not None:
bounds[i] = param.bounds
else:
- raise ValueError("plot_cost2d could not find bounds required for plotting")
+ raise ValueError("plot2d could not find bounds required for plotting")
return bounds
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 12f74958..dc0d54d1 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -94,14 +94,14 @@ def cost(self, fitting_problem):
@pytest.mark.unit
def test_cost_plots(self, cost):
# Test plotting of Cost objects
- pybop.plot_cost2d(cost, gradient=True, steps=5)
+ pybop.plot2d(cost, gradient=True, steps=5)
# Test without bounds
for param in cost.problem.parameters:
param.bounds = None
with pytest.raises(ValueError):
- pybop.plot_cost2d(cost, steps=5)
- pybop.plot_cost2d(cost, bounds=np.array([[0.5, 0.8], [0.4, 0.7]]), steps=5)
+ pybop.plot2d(cost, steps=5)
+ pybop.plot2d(cost, bounds=np.array([[0.5, 0.8], [0.4, 0.7]]), steps=5)
@pytest.fixture
def optim(self, cost):
@@ -127,7 +127,7 @@ def test_with_ipykernel(self, dataset, cost, optim):
print(ipykernel.__version__)
pybop.plot_dataset(dataset, signal=["Voltage [V]"])
- pybop.plot_cost2d(cost, gradient=True, steps=5)
+ pybop.plot2d(cost, gradient=True, steps=5)
pybop.plot_convergence(optim)
pybop.plot_parameters(optim)
pybop.plot_optim2d(optim, steps=5)
From 9ef38974a99913b02b17654f2c6ae4dfeceac410 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:15:39 +0000
Subject: [PATCH 55/64] Merge plot_optim2d with plot2d
---
pybop/__init__.py | 2 +-
pybop/plotting/plot_convergence.py | 86 ------------------------------
pybop/plotting/plot_cost2d.py | 59 ++++++++++++++++++--
tests/unit/test_plots.py | 4 +-
4 files changed, 59 insertions(+), 92 deletions(-)
diff --git a/pybop/__init__.py b/pybop/__init__.py
index 05fd61dc..dd49e2e5 100644
--- a/pybop/__init__.py
+++ b/pybop/__init__.py
@@ -114,7 +114,7 @@
from .plotting.quick_plot import StandardPlot, StandardSubplot, plot_trajectories
from .plotting.plot_cost2d import plot2d
from .plotting.plot_dataset import plot_dataset
-from .plotting.plot_convergence import plot_convergence, plot_optim2d
+from .plotting.plot_convergence import plot_convergence
from .plotting.plot_parameters import plot_parameters
from .plotting.plot_problem import quick_plot
diff --git a/pybop/plotting/plot_convergence.py b/pybop/plotting/plot_convergence.py
index 52e238f0..e1814b4a 100644
--- a/pybop/plotting/plot_convergence.py
+++ b/pybop/plotting/plot_convergence.py
@@ -56,89 +56,3 @@ def plot_convergence(optim, show=True, **layout_kwargs):
fig.show()
return fig
-
-
-def plot_optim2d(optim, bounds=None, steps=10, show=True, **layout_kwargs):
- """
- Plot a 2D visualization of a cost landscape using Plotly with the optimisation trace.
-
- This function generates a contour plot representing the cost landscape for a provided
- callable cost function over a grid of parameter values within the specified bounds.
-
- Parameters
- ----------
- optim : object
- Optimisation object which provides a specific optimisation trace overlaid on the cost landscape.
- bounds : numpy.ndarray, optional
- A 2x2 array specifying the [min, max] bounds for each parameter. If None, uses `get_param_bounds`.
- steps : int, optional
- The number of intervals to divide the parameter space into along each dimension (default is 10).
- show : bool, optional
- If True, the figure is shown upon creation (default: True).
- **layout_kwargs : optional
- Valid Plotly layout keys and their values,
- e.g. `xaxis_title="Time [s]"` or
- `xaxis={"title": "Time [s]", "titlefont_size": 18}`.
-
- Returns
- -------
- plotly.graph_objs.Figure
- The Plotly figure object containing the cost landscape plot.
-
- Raises
- ------
- ValueError
- If the cost function does not return a valid cost when called with a parameter list.
- """
- # Extract the cost function from the optimisation object
- cost = optim.cost
-
- # Create the cost landscape
- fig = pybop.plot2d(cost, bounds=bounds, steps=steps, show=False)
-
- # Import plotly only when needed
- go = pybop.PlotlyManager().go
-
- # Plot the optimisation trace
- optim_trace = np.array([item for sublist in optim.log for item in sublist])
- optim_trace = optim_trace.reshape(-1, 2)
- fig.add_trace(
- go.Scatter(
- x=optim_trace[:, 0],
- y=optim_trace[:, 1],
- mode="markers",
- marker=dict(
- color=[i / len(optim_trace) for i in range(len(optim_trace))],
- colorscale="YlOrBr",
- showscale=False,
- ),
- showlegend=False,
- )
- )
-
- # Plot the initial guess
- fig.add_trace(
- go.Scatter(
- x=[optim.x0[0]],
- y=[optim.x0[1]],
- mode="markers",
- marker_symbol="circle",
- marker=dict(
- color="mediumspringgreen",
- line_color="mediumspringgreen",
- line_width=1,
- size=14,
- showscale=False,
- ),
- showlegend=False,
- )
- )
-
- # Update the layout and display the figure
- fig.update_layout(**layout_kwargs)
- if "ipykernel" in sys.modules and show:
- fig.show("svg")
- elif show:
- fig.show()
-
- return fig
diff --git a/pybop/plotting/plot_cost2d.py b/pybop/plotting/plot_cost2d.py
index 767ff975..d03fcb59 100644
--- a/pybop/plotting/plot_cost2d.py
+++ b/pybop/plotting/plot_cost2d.py
@@ -3,7 +3,9 @@
import numpy as np
-def plot2d(cost, gradient=False, bounds=None, steps=10, show=True, **layout_kwargs):
+def plot2d(
+ cost_or_optim, gradient=False, bounds=None, steps=10, show=True, **layout_kwargs
+):
"""
Plot a 2D visualisation of a cost landscape using Plotly.
@@ -12,8 +14,12 @@ def plot2d(cost, gradient=False, bounds=None, steps=10, show=True, **layout_kwar
Parameters
----------
- cost : callable
- The cost function to be evaluated. Must accept a list of parameter values and return a cost value.
+ cost_or_optim : a callable cost function, pybop Cost or Optimisation object
+ Either:
+ - the cost function to be evaluated. Must accept a list of parameter values and return a cost value.
+ - an Optimisation object which provides a specific optimisation trace overlaid on the cost landscape.
+ gradient : bool, optional
+ If True, the gradient is shown (default: False).
bounds : numpy.ndarray, optional
A 2x2 array specifying the [min, max] bounds for each parameter. If None, uses `get_param_bounds`.
steps : int, optional
@@ -36,6 +42,15 @@ def plot2d(cost, gradient=False, bounds=None, steps=10, show=True, **layout_kwar
If the cost function does not return a valid cost when called with a parameter list.
"""
+ # Assign input as a cost or optimisation object
+ if isinstance(cost_or_optim, pybop.Optimisation):
+ optim = cost_or_optim
+ plot_optim = True
+ cost = optim.cost
+ else:
+ cost = cost_or_optim
+ plot_optim = False
+
# Set up parameter bounds
if bounds is None:
bounds = get_param_bounds(cost)
@@ -91,6 +106,44 @@ def plot2d(cost, gradient=False, bounds=None, steps=10, show=True, **layout_kwar
# Create contour plot and update the layout
fig = go.Figure(data=[go.Contour(x=x, y=y, z=costs)], layout=layout)
+
+ if plot_optim:
+ # Plot the optimisation trace
+ optim_trace = np.array([item for sublist in optim.log for item in sublist])
+ optim_trace = optim_trace.reshape(-1, 2)
+ fig.add_trace(
+ go.Scatter(
+ x=optim_trace[:, 0],
+ y=optim_trace[:, 1],
+ mode="markers",
+ marker=dict(
+ color=[i / len(optim_trace) for i in range(len(optim_trace))],
+ colorscale="YlOrBr",
+ showscale=False,
+ ),
+ showlegend=False,
+ )
+ )
+
+ # Plot the initial guess
+ fig.add_trace(
+ go.Scatter(
+ x=[optim.x0[0]],
+ y=[optim.x0[1]],
+ mode="markers",
+ marker_symbol="circle",
+ marker=dict(
+ color="mediumspringgreen",
+ line_color="mediumspringgreen",
+ line_width=1,
+ size=14,
+ showscale=False,
+ ),
+ showlegend=False,
+ )
+ )
+
+ # Update the layout and display the figure
fig.update_layout(**layout_kwargs)
if "ipykernel" in sys.modules and show:
fig.show("svg")
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index dc0d54d1..1384272e 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -119,7 +119,7 @@ def test_optim_plots(self, optim):
pybop.plot_parameters(optim)
# Plot the cost landscape with optimisation path
- pybop.plot_optim2d(optim, steps=5)
+ pybop.plot2d(optim, steps=5)
@pytest.mark.unit
def test_with_ipykernel(self, dataset, cost, optim):
@@ -130,4 +130,4 @@ def test_with_ipykernel(self, dataset, cost, optim):
pybop.plot2d(cost, gradient=True, steps=5)
pybop.plot_convergence(optim)
pybop.plot_parameters(optim)
- pybop.plot_optim2d(optim, steps=5)
+ pybop.plot2d(optim, steps=5)
From f42298f60199d4b2a566a7ad9f55623ff4ca2c3a Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:16:58 +0000
Subject: [PATCH 56/64] Update scripts to plot2d
---
examples/scripts/ecm_CMAES.py | 4 ++--
examples/scripts/exp_UKF.py | 2 +-
examples/scripts/spm_CMAES.py | 4 ++--
examples/scripts/spm_IRPropMin.py | 2 +-
examples/scripts/spm_MLE.py | 2 +-
examples/scripts/spm_SNES.py | 2 +-
examples/scripts/spm_UKF.py | 2 +-
examples/scripts/spm_XNES.py | 2 +-
examples/scripts/spm_adam.py | 2 +-
examples/scripts/spm_descent.py | 2 +-
examples/scripts/spm_pso.py | 2 +-
examples/scripts/spm_scipymin.py | 2 +-
examples/scripts/spme_max_energy.py | 2 +-
13 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/examples/scripts/ecm_CMAES.py b/examples/scripts/ecm_CMAES.py
index da5835ad..0e0f5476 100644
--- a/examples/scripts/ecm_CMAES.py
+++ b/examples/scripts/ecm_CMAES.py
@@ -97,8 +97,8 @@
pybop.plot_parameters(optim)
# Plot the cost landscape
-pybop.plot_cost2d(cost, steps=15)
+pybop.plot2d(cost, steps=15)
# Plot the cost landscape with optimisation path and updated bounds
bounds = np.array([[1e-4, 1e-2], [1e-5, 1e-2]])
-pybop.plot_optim2d(optim, bounds=bounds, steps=15)
+pybop.plot2d(optim, bounds=bounds, steps=15)
diff --git a/examples/scripts/exp_UKF.py b/examples/scripts/exp_UKF.py
index d7d838cc..340524fd 100644
--- a/examples/scripts/exp_UKF.py
+++ b/examples/scripts/exp_UKF.py
@@ -112,4 +112,4 @@
pybop.plot_parameters(optim)
# Plot the cost landscape with optimisation path
-pybop.plot_optim2d(optim, steps=15)
+pybop.plot2d(optim, steps=15)
diff --git a/examples/scripts/spm_CMAES.py b/examples/scripts/spm_CMAES.py
index 816f6798..89131604 100644
--- a/examples/scripts/spm_CMAES.py
+++ b/examples/scripts/spm_CMAES.py
@@ -68,7 +68,7 @@
pybop.plot_parameters(optim)
# Plot the cost landscape
-pybop.plot_cost2d(cost, steps=15)
+pybop.plot2d(cost, steps=15)
# Plot the cost landscape with optimisation path
-pybop.plot_optim2d(optim, steps=15)
+pybop.plot2d(optim, steps=15)
diff --git a/examples/scripts/spm_IRPropMin.py b/examples/scripts/spm_IRPropMin.py
index 277b9f08..973e2f8a 100644
--- a/examples/scripts/spm_IRPropMin.py
+++ b/examples/scripts/spm_IRPropMin.py
@@ -52,4 +52,4 @@
# Plot the cost landscape with optimisation path
bounds = np.array([[0.5, 0.8], [0.4, 0.7]])
-pybop.plot_optim2d(optim, bounds=bounds, steps=15)
+pybop.plot2d(optim, bounds=bounds, steps=15)
diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py
index 6e822c4a..0fb86088 100644
--- a/examples/scripts/spm_MLE.py
+++ b/examples/scripts/spm_MLE.py
@@ -67,4 +67,4 @@
# Plot the cost landscape with optimisation path
bounds = np.array([[0.55, 0.77], [0.48, 0.68]])
-pybop.plot_optim2d(optim, bounds=bounds, steps=15)
+pybop.plot2d(optim, bounds=bounds, steps=15)
diff --git a/examples/scripts/spm_SNES.py b/examples/scripts/spm_SNES.py
index e3663976..421d386a 100644
--- a/examples/scripts/spm_SNES.py
+++ b/examples/scripts/spm_SNES.py
@@ -51,4 +51,4 @@
pybop.plot_parameters(optim)
# Plot the cost landscape with optimisation path
-pybop.plot_optim2d(optim, steps=15)
+pybop.plot2d(optim, steps=15)
diff --git a/examples/scripts/spm_UKF.py b/examples/scripts/spm_UKF.py
index 067a554d..e629d3f3 100644
--- a/examples/scripts/spm_UKF.py
+++ b/examples/scripts/spm_UKF.py
@@ -76,4 +76,4 @@
# pybop.plot_parameters(optim)
# # Plot the cost landscape with optimisation path
-# pybop.plot_optim2d(optim, steps=15)
+# pybop.plot2d(optim, steps=15)
diff --git a/examples/scripts/spm_XNES.py b/examples/scripts/spm_XNES.py
index 3321eb9b..ef9a9e03 100644
--- a/examples/scripts/spm_XNES.py
+++ b/examples/scripts/spm_XNES.py
@@ -52,4 +52,4 @@
pybop.plot_parameters(optim)
# Plot the cost landscape with optimisation path
-pybop.plot_optim2d(optim, steps=15)
+pybop.plot2d(optim, steps=15)
diff --git a/examples/scripts/spm_adam.py b/examples/scripts/spm_adam.py
index 6be9849c..687ff3b3 100644
--- a/examples/scripts/spm_adam.py
+++ b/examples/scripts/spm_adam.py
@@ -78,4 +78,4 @@ def noise(sigma):
# Plot the cost landscape with optimisation path
bounds = np.array([[0.5, 0.8], [0.4, 0.7]])
-pybop.plot_optim2d(optim, bounds=bounds, steps=15)
+pybop.plot2d(optim, bounds=bounds, steps=15)
diff --git a/examples/scripts/spm_descent.py b/examples/scripts/spm_descent.py
index 624c95aa..a474234c 100644
--- a/examples/scripts/spm_descent.py
+++ b/examples/scripts/spm_descent.py
@@ -55,4 +55,4 @@
# Plot the cost landscape with optimisation path
bounds = np.array([[0.5, 0.8], [0.4, 0.7]])
-pybop.plot_optim2d(optim, bounds=bounds, steps=15)
+pybop.plot2d(optim, bounds=bounds, steps=15)
diff --git a/examples/scripts/spm_pso.py b/examples/scripts/spm_pso.py
index 819a7454..74d0ea4c 100644
--- a/examples/scripts/spm_pso.py
+++ b/examples/scripts/spm_pso.py
@@ -52,4 +52,4 @@
pybop.plot_parameters(optim)
# Plot the cost landscape with optimisation path
-pybop.plot_optim2d(optim, steps=15)
+pybop.plot2d(optim, steps=15)
diff --git a/examples/scripts/spm_scipymin.py b/examples/scripts/spm_scipymin.py
index 759f8c2e..1573701b 100644
--- a/examples/scripts/spm_scipymin.py
+++ b/examples/scripts/spm_scipymin.py
@@ -53,4 +53,4 @@
pybop.plot_parameters(optim)
# Plot the cost landscape with optimisation path
-pybop.plot_optim2d(optim, steps=15)
+pybop.plot2d(optim, steps=15)
diff --git a/examples/scripts/spme_max_energy.py b/examples/scripts/spme_max_energy.py
index c4249590..4d4e3c73 100644
--- a/examples/scripts/spme_max_energy.py
+++ b/examples/scripts/spme_max_energy.py
@@ -66,4 +66,4 @@
# Plot the cost landscape with optimisation path
if len(x) == 2:
- pybop.plot_optim2d(optim, steps=3)
+ pybop.plot2d(optim, steps=3)
From 3ca404d9b19600859a79401b30c3d2d5e66150a9 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:18:18 +0000
Subject: [PATCH 57/64] Update notebooks to plot2d
---
examples/notebooks/spm_Adam.ipynb | 4 ++--
examples/notebooks/spm_CMAES.ipynb | 4 ++--
examples/notebooks/spm_electrode_design.ipynb | 2 +-
examples/notebooks/spm_scipy_DifferentialEvolution.ipynb | 4 ++--
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/examples/notebooks/spm_Adam.ipynb b/examples/notebooks/spm_Adam.ipynb
index d3860b53..9d0c755c 100644
--- a/examples/notebooks/spm_Adam.ipynb
+++ b/examples/notebooks/spm_Adam.ipynb
@@ -443,10 +443,10 @@
],
"source": [
"# Plot the cost landscape\n",
- "pybop.plot_cost2d(cost, steps=15)\n",
+ "pybop.plot2d(cost, steps=15)\n",
"# Plot the cost landscape with optimisation path and updated bounds\n",
"bounds = np.array([[0.6, 0.9], [0.5, 0.8]])\n",
- "pybop.plot_optim2d(optim, bounds=bounds, steps=15);"
+ "pybop.plot2d(optim, bounds=bounds, steps=15);"
]
},
{
diff --git a/examples/notebooks/spm_CMAES.ipynb b/examples/notebooks/spm_CMAES.ipynb
index 73b783ce..03093236 100644
--- a/examples/notebooks/spm_CMAES.ipynb
+++ b/examples/notebooks/spm_CMAES.ipynb
@@ -433,10 +433,10 @@
],
"source": [
"# Plot the cost landscape\n",
- "pybop.plot_cost2d(cost, steps=15)\n",
+ "pybop.plot2d(cost, steps=15)\n",
"# Plot the cost landscape with optimisation path and updated bounds\n",
"bounds = np.array([[0.6, 0.9], [0.5, 0.8]])\n",
- "pybop.plot_optim2d(optim, bounds=bounds, steps=15);"
+ "pybop.plot2d(optim, bounds=bounds, steps=15);"
]
},
{
diff --git a/examples/notebooks/spm_electrode_design.ipynb b/examples/notebooks/spm_electrode_design.ipynb
index 8b44b8f5..538c9f97 100644
--- a/examples/notebooks/spm_electrode_design.ipynb
+++ b/examples/notebooks/spm_electrode_design.ipynb
@@ -310,7 +310,7 @@
],
"source": [
"if len(x) == 2:\n",
- " pybop.plot_optim2d(optim, steps=3)"
+ " pybop.plot2d(optim, steps=3)"
]
}
],
diff --git a/examples/notebooks/spm_scipy_DifferentialEvolution.ipynb b/examples/notebooks/spm_scipy_DifferentialEvolution.ipynb
index 21e3c1ac..2d9f77cd 100644
--- a/examples/notebooks/spm_scipy_DifferentialEvolution.ipynb
+++ b/examples/notebooks/spm_scipy_DifferentialEvolution.ipynb
@@ -441,10 +441,10 @@
],
"source": [
"# Plot the cost landscape\n",
- "pybop.plot_cost2d(cost, steps=15)\n",
+ "pybop.plot2d(cost, steps=15)\n",
"# Plot the cost landscape with optimisation path and updated bounds\n",
"bounds = np.array([[0.6, 0.9], [0.5, 0.8]])\n",
- "pybop.plot_optim2d(optim, bounds=bounds, steps=15);"
+ "pybop.plot2d(optim, bounds=bounds, steps=15);"
]
},
{
From 302add72f1fc237fbadc961c2d1f911d807b5e34 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:39:58 +0000
Subject: [PATCH 58/64] Update spm_MLE.py
---
examples/scripts/spm_MLE.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/scripts/spm_MLE.py b/examples/scripts/spm_MLE.py
index 0fb86088..3b319ff1 100644
--- a/examples/scripts/spm_MLE.py
+++ b/examples/scripts/spm_MLE.py
@@ -63,7 +63,7 @@
pybop.plot_parameters(optim)
# Plot the cost landscape
-pybop.plot_cost2d(likelihood, steps=15)
+pybop.plot2d(likelihood, steps=15)
# Plot the cost landscape with optimisation path
bounds = np.array([[0.55, 0.77], [0.48, 0.68]])
From fe090f1e1f757f253b198ecda3347b53342e2792 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:41:47 +0000
Subject: [PATCH 59/64] Update filename to plot2d
---
pybop/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pybop/__init__.py b/pybop/__init__.py
index dd49e2e5..82b7ea6e 100644
--- a/pybop/__init__.py
+++ b/pybop/__init__.py
@@ -112,7 +112,7 @@
#
from .plotting.plotly_manager import PlotlyManager
from .plotting.quick_plot import StandardPlot, StandardSubplot, plot_trajectories
-from .plotting.plot_cost2d import plot2d
+from .plotting.plot2d import plot2d
from .plotting.plot_dataset import plot_dataset
from .plotting.plot_convergence import plot_convergence
from .plotting.plot_parameters import plot_parameters
From 7b8c7f0688affa0fbedf6dea4765cb92e0437485 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:42:42 +0000
Subject: [PATCH 60/64] Rename plot_cost2d.py to plot2d.py
---
pybop/plotting/{plot_cost2d.py => plot2d.py} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename pybop/plotting/{plot_cost2d.py => plot2d.py} (100%)
diff --git a/pybop/plotting/plot_cost2d.py b/pybop/plotting/plot2d.py
similarity index 100%
rename from pybop/plotting/plot_cost2d.py
rename to pybop/plotting/plot2d.py
From 8bfaeba8b5d7138d2bb5fc80b6bef56265609ccb Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:55:35 +0000
Subject: [PATCH 61/64] Fix typo in error message
---
pybop/observers/observer.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pybop/observers/observer.py b/pybop/observers/observer.py
index ce482ec8..0bb8b08e 100644
--- a/pybop/observers/observer.py
+++ b/pybop/observers/observer.py
@@ -111,7 +111,7 @@ def log_likelihood(self, values: dict, times: np.ndarray, inputs: Inputs) -> flo
return log_likelihood
else:
raise ValueError(
- "Obersever.log_likelihood is currently restricted to single output models."
+ "Observer.log_likelihood is currently restricted to single output models."
)
def get_current_state(self) -> TimeSeriesState:
From cb705052b3bd93f72578777d2bfb2b55304319e5 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:57:33 +0000
Subject: [PATCH 62/64] Reduce init_soc in test_model_misparameterisation
---
tests/integration/test_parameterisations.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/integration/test_parameterisations.py b/tests/integration/test_parameterisations.py
index 73c87de4..d4fb2b5a 100644
--- a/tests/integration/test_parameterisations.py
+++ b/tests/integration/test_parameterisations.py
@@ -215,7 +215,7 @@ def test_multiple_signals(self, multi_optimiser, spm_two_signal_cost):
assert initial_cost > final_cost
np.testing.assert_allclose(x, self.ground_truth, atol=2.5e-2)
- @pytest.mark.parametrize("init_soc", [0.4, 0.7])
+ @pytest.mark.parametrize("init_soc", [0.4, 0.6])
@pytest.mark.integration
def test_model_misparameterisation(self, parameters, model, init_soc):
# Define two different models with different parameter sets
From 76b5c19598a13ae9b24aab61fb382530535ded49 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Thu, 21 Mar 2024 09:59:05 +0000
Subject: [PATCH 63/64] Update ipykernel print to assert
Co-authored-by: Brady Planden <55357039+BradyPlanden@users.noreply.github.com>
---
tests/unit/test_plots.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 1384272e..4b070d16 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -125,7 +125,7 @@ def test_optim_plots(self, optim):
def test_with_ipykernel(self, dataset, cost, optim):
import ipykernel
- print(ipykernel.__version__)
+ assert ipykernel.__version__ >= 0.6
pybop.plot_dataset(dataset, signal=["Voltage [V]"])
pybop.plot2d(cost, gradient=True, steps=5)
pybop.plot_convergence(optim)
From e513c33232d2b3d4adcfe52df42abd6419eac301 Mon Sep 17 00:00:00 2001
From: NicolaCourtier <45851982+NicolaCourtier@users.noreply.github.com>
Date: Thu, 21 Mar 2024 10:21:09 +0000
Subject: [PATCH 64/64] Fix version check
---
tests/unit/test_plots.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/unit/test_plots.py b/tests/unit/test_plots.py
index 4b070d16..b60ca2a8 100644
--- a/tests/unit/test_plots.py
+++ b/tests/unit/test_plots.py
@@ -1,6 +1,7 @@
import pybop
import numpy as np
import pytest
+from packaging import version
class TestPlots:
@@ -125,7 +126,7 @@ def test_optim_plots(self, optim):
def test_with_ipykernel(self, dataset, cost, optim):
import ipykernel
- assert ipykernel.__version__ >= 0.6
+ assert version.parse(ipykernel.__version__) >= version.parse("0.6")
pybop.plot_dataset(dataset, signal=["Voltage [V]"])
pybop.plot2d(cost, gradient=True, steps=5)
pybop.plot_convergence(optim)