Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/meanfunc #74

Merged
merged 21 commits into from
Mar 16, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
8b0d389
initial working implementation of mean function with tests
edaub Jan 15, 2020
55c87f1
implemented polynomial mean function
edaub Jan 17, 2020
32a9fe5
added finite difference tests to mean function unit tests
edaub Jan 28, 2020
2a9e382
made finite difference spacing a fixture
edaub Jan 28, 2020
e38251d
changed fixed mean to use a function rather than lambda for derivative
edaub Jan 28, 2020
5444671
implemented mean function (note only unit tests assume zero mean, wil…
edaub Jan 31, 2020
c622b43
fixed bugs in mean function implementation
edaub Jan 31, 2020
27ef73f
implemented additional mean function capabilities and wrote tests
edaub Feb 12, 2020
539d819
added docstrings to base mean function class
edaub Feb 13, 2020
69c72e2
documented mean sum class
edaub Feb 13, 2020
ce8b2aa
documented mean product class
edaub Feb 13, 2020
813471c
documented mean composite class
edaub Feb 13, 2020
b82feae
documented fixed mean class
edaub Feb 13, 2020
b1962c8
added documentation and a few other checks to the fixed mean function…
edaub Feb 13, 2020
4a884aa
added documentation for coefficient class
edaub Feb 13, 2020
d89bdbc
added documentation for polynomial mean class
edaub Feb 13, 2020
0fabfce
added module docstring and test for indexing error in composite mean
edaub Feb 13, 2020
7e3648b
added documentation page for mean function
edaub Feb 13, 2020
e5448dd
implemented power mean function
edaub Feb 18, 2020
7e66fd7
fixed bugs in mean power and wrote unit tests
edaub Feb 18, 2020
1c41b79
updated pre-release version for merge to devel
edaub Mar 16, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions docs/MeanFunction.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
.. _MeanFunction:

**********************************
The ``MeanFunction`` Module
**********************************

.. automodule:: mogp_emulator.MeanFunction
:noindex:

.. autoclass:: mogp_emulator.MeanFunction.MeanFunction
:members:

.. autoclass:: mogp_emulator.MeanFunction.MeanSum
:members:

.. autoclass:: mogp_emulator.MeanFunction.MeanProduct
:members:

.. autoclass:: mogp_emulator.MeanFunction.MeanComposite
:members:

.. autoclass:: mogp_emulator.MeanFunction.FixedMean
:members:

.. autoclass:: mogp_emulator.MeanFunction.ConstantMean
:members:

.. autoclass:: mogp_emulator.MeanFunction.LinearMean
:members:

.. autoclass:: mogp_emulator.MeanFunction.Coefficient
:members:

.. autoclass:: mogp_emulator.MeanFunction.PolynomialMean
:members:

1 change: 1 addition & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ Welcome to Multi-Output GP Emulator's documentation!
GaussianProcess
DimensionReduction
MultiOutputGP
MeanFunction
Kernel
ExperimentalDesign
SequentialDesign
Expand Down
49 changes: 31 additions & 18 deletions mogp_emulator/GaussianProcess.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def __init__(self, *args):

self.nugget = nugget

self.kernel = SquaredExponential()
self.kernel = SquaredExponential()

if not (emulator_file is None or theta is None):
self._set_params(theta)
Expand Down Expand Up @@ -238,6 +238,20 @@ def get_D(self):

return self.D

def get_n_params(self):
"""
Returns number of hyperparameters

Returns the number of hyperparameters for the emulator. The number depends on the
choice of mean function, covariance function, and nugget strategy, and possibly the
number of inputs for certain choices of the mean function.

:returns: Number of hyperparameters
:rtype: int
"""

return self.D + 1

def get_params(self):
"""
Returns emulator parameters
Expand Down Expand Up @@ -395,7 +409,7 @@ def _set_params(self, theta):
"""

theta = np.array(theta)
assert theta.shape == (self.D + 1,), "Parameter vector must have length number of inputs + 1"
assert theta.shape == (self.get_n_params(),), "Parameter vector must have length number of inputs + 1"

self.theta = theta
self._prepare_likelihood()
Expand Down Expand Up @@ -448,12 +462,12 @@ def partial_devs(self, theta):
:rtype: ndarray
"""

assert theta.shape == (self.D + 1,), "Parameter vector must have length number of inputs + 1"
assert theta.shape == (self.get_n_params(),), "Parameter vector must have length number of inputs + 1"

if not np.allclose(np.array(theta), self.theta):
self._set_params(theta)

partials = np.zeros(self.D + 1)
partials = np.zeros(self.get_n_params())

dKdtheta = self.kernel.kernel_deriv(self.inputs, self.inputs, self.theta)

Expand Down Expand Up @@ -490,26 +504,26 @@ def hessian(self, theta):
:rtype: ndarray
"""

assert theta.shape == (self.D + 1,), "Parameter vector must have length number of inputs + 1"
assert theta.shape == (self.get_n_params(),), "Parameter vector must have length number of inputs + 1"

if not np.allclose(np.array(theta), self.theta):
self._set_params(theta)

hessian = np.zeros((self.D + 1, self.D + 1))

dKdtheta = self.kernel.kernel_deriv(self.inputs, self.inputs, self.theta)
d2Kdtheta2 = self.kernel.kernel_hessian(self.inputs, self.inputs, self.theta)

hessian = np.zeros((self.get_n_params(), self.get_n_params()))

for d1 in range(self.D + 1):
invQ_dot_d1 = linalg.cho_solve((self.L, True), dKdtheta[d1])
for d2 in range(self.D + 1):
invQ_dot_d2 = linalg.cho_solve((self.L, True), dKdtheta[d2])
invQ_dot_d1d2 = linalg.cho_solve((self.L, True), d2Kdtheta2[d1, d2])
hessian[d1, d2] = 0.5*(np.linalg.multi_dot([self.invQt,
2.*np.dot(dKdtheta[d1], invQ_dot_d2) -
d2Kdtheta2[d1, d2],
self.invQt]) -
np.trace(np.dot(invQ_dot_d1, invQ_dot_d2) - invQ_dot_d1d2))
term_1 = np.linalg.multi_dot([self.invQt,
2.*np.dot(dKdtheta[d1], invQ_dot_d2) - d2Kdtheta2[d1, d2],
self.invQt])
term_2 = np.trace(np.dot(invQ_dot_d1, invQ_dot_d2) - invQ_dot_d1d2)
hessian[d1, d2] = 0.5*(term_1 - term_2)

return hessian

Expand Down Expand Up @@ -606,10 +620,10 @@ def learn_hyperparameters(self, n_tries = 15, theta0 = None, method = 'L-BFGS-B'
loglikelihood_values = []
theta_values = []

theta_startvals = 5.*(np.random.rand(n_tries, self.D + 1) - 0.5)
theta_startvals = 5.*(np.random.rand(n_tries, self.get_n_params()) - 0.5)
if not theta0 is None:
theta0 = np.array(theta0)
assert theta0.shape == (self.D + 1,), "theta0 must be a 1D array with length D + 1"
assert theta0.shape == (self.get_n_params(),), "theta0 must be a 1D array with length D + 1"
theta_startvals[0,:] = theta0

for theta in theta_startvals:
Expand Down Expand Up @@ -659,7 +673,7 @@ def compute_local_covariance(self):

try:
L = np.linalg.cholesky(hess)
cov = linalg.cho_solve((L, True), np.eye(self.D + 1))
cov = linalg.cho_solve((L, True), np.eye(self.get_n_params()))
except linalg.LinAlgError:
raise linalg.LinAlgError("Hessian matrix is not symmetric positive definite, optimization may not have converged")

Expand Down Expand Up @@ -884,8 +898,7 @@ def _predict_single(self, testing, do_deriv = True, do_unc = True):
if do_deriv:
deriv = np.zeros((n_testing, self.D))
kern_deriv = self.kernel.kernel_inputderiv(testing, self.inputs, self.theta)
for d in range(self.D):
deriv[:, d] = np.dot(kern_deriv[d], self.invQt)
deriv = np.transpose(np.dot(kern_deriv, self.invQt))

return mu, var, deriv

Expand Down Expand Up @@ -971,7 +984,7 @@ def _predict_samples(self, testing, do_deriv = True, do_unc = True):

mu_mean = np.mean(mu, axis = 0)
if do_unc:
var_mean = np.mean(var, axis = 0)+np.var(mu, axis = 0)
var_mean = np.mean(var, axis = 0) + np.var(mu, axis = 0)
else:
var_mean = None
if do_deriv:
Expand Down
Loading