diff --git a/mogp_emulator/GaussianProcess.py b/mogp_emulator/GaussianProcess.py index 43bee5bf..1769a246 100644 --- a/mogp_emulator/GaussianProcess.py +++ b/mogp_emulator/GaussianProcess.py @@ -503,7 +503,7 @@ def get_design_matrix(self, inputs): dm = np.ones((inputs.shape[0], 1)) else: try: - dm = dmatrix(self._mean, data={"x": inputs.T}) + dm = np.array(dmatrix(self._mean, data={"x": inputs.T})) except PatsyError: raise ValueError("Provided mean function is invalid") if not dm.shape[0] == inputs.shape[0]: @@ -658,6 +658,8 @@ def fit(self, theta): self.theta.mean = calc_mean_params(self.Ainv, self.Kinv_t, self._dm, self.priors.mean) + + self.Kinv_t_mean = self.Kinv.solve(self.targets - np.dot(self._dm, self.theta.mean)) if self.priors.mean.has_weak_priors: n_coeff = self.n - self.n_mean @@ -874,7 +876,7 @@ def predict(self, testing, unc=True, deriv=False, include_nugget=True): mtest = np.dot(dmtest, self.theta.mean) Ktest = self.get_cov_matrix(testing) - mu = mtest + np.dot(Ktest.T, self.Kinv_t) + mu = mtest + np.dot(Ktest.T, self.Kinv_t_mean) var = None if unc: diff --git a/mogp_emulator/MultiOutputGP.py b/mogp_emulator/MultiOutputGP.py index da2cb5fd..cd3deb00 100644 --- a/mogp_emulator/MultiOutputGP.py +++ b/mogp_emulator/MultiOutputGP.py @@ -8,6 +8,7 @@ ) from mogp_emulator.Kernel import KernelBase from mogp_emulator.Priors import GPPriors +from patsy import ModelDesc class MultiOutputGP(object): """Implementation of a multiple-output Gaussian Process Emulator. @@ -70,6 +71,10 @@ def __init__(self, inputs, targets, mean=None, kernel="SquaredExponential", prio assert isinstance(mean, list), "mean must be None, a string, a valid patsy model description, or a list of None/string/mean functions" assert len(mean) == self.n_emulators + + if any([isinstance(m, ModelDesc) for m in mean]): + warnings.warn("Specifying mean functions using a patsy ModelDesc does not support parallel " + + "fitting and prediction with MultiOutputGPs") if isinstance(kernel, str) or issubclass(type(kernel), KernelBase): kernel = self.n_emulators*[kernel] @@ -200,8 +205,11 @@ def predict(self, testing, unc=True, deriv=False, include_nugget=True, predict_method = _gp_predict_default_NaN else: predict_method = self.GPClass.predict + + serial_predict = (platform.system() == "Windows" or + any([isinstance(em._mean, ModelDesc) for em in self.emulators])) - if platform.system() == "Windows": + if serial_predict: predict_vals = [predict_method(gp, testing, unc, deriv, include_nugget) for gp in self.emulators] else: diff --git a/mogp_emulator/tests/test_GaussianProcess.py b/mogp_emulator/tests/test_GaussianProcess.py index c6705e24..02a1e97d 100644 --- a/mogp_emulator/tests/test_GaussianProcess.py +++ b/mogp_emulator/tests/test_GaussianProcess.py @@ -500,8 +500,11 @@ def test_GaussianProcess_fit_logposterior(x, y, mean, nugget, sn): mean_expect = np.linalg.solve(A, np.dot(gp._dm.T, Kinv_t_expect)) + Kinv_t_mean_expect = np.linalg.solve(K, y - np.dot(gp._dm, mean_expect)) + assert_allclose(L_expect, gp.Kinv.L) assert_allclose(Kinv_t_expect, gp.Kinv_t) + assert_allclose(Kinv_t_mean_expect, gp.Kinv_t_mean, atol=1.e-10) assert_allclose(LA_expect, gp.Ainv.L) assert_allclose(mean_expect, gp.theta.mean) assert_allclose(logpost_expect, gp.current_logpost) @@ -807,7 +810,7 @@ def test_GaussianProcess_predict(x, y, dx): K = np.exp(theta[-1])*gp.kernel.kernel_f(x, x, theta[:-1]) Ktest = np.exp(theta[-1])*gp.kernel.kernel_f(x_test, x, theta[:-1]) - mu_expect = np.dot(Ktest, gp.Kinv_t) + mu_expect = np.dot(Ktest, gp.Kinv_t_mean) var_expect = np.exp(theta[-1]) - np.diag(np.dot(Ktest, np.linalg.solve(K, Ktest.T))) assert_allclose(mu, mu_expect) @@ -853,13 +856,19 @@ def test_GaussianProcess_predict(x, y, dx): Ktest = np.exp(theta[-1])*gp.kernel.kernel_f(x_test, x, theta[:-1]) R = dm_test.T - np.dot(gp._dm.T, np.linalg.solve(K, Ktest.T)) - mu_expect = m + np.dot(Ktest, gp.Kinv_t) + mu_expect = m + np.dot(Ktest, gp.Kinv_t_mean) var_expect += np.diag(np.dot(R.T, np.linalg.solve(np.dot(gp._dm.T, np.linalg.solve(K, gp._dm)), R))) assert_allclose(mu, mu_expect) assert_allclose(var, var_expect) + # check that predictions at inputs are close to mean + + mu, var, deriv = gp.predict(x) + + assert_allclose(mu, y) + # nonzero mean priors # gp = GaussianProcess(x, y, mean="x[0]", diff --git a/setup.py b/setup.py index b199e4ec..794eb127 100644 --- a/setup.py +++ b/setup.py @@ -10,8 +10,8 @@ MAJOR = 0 MINOR = 7 MICRO = 0 -PRERELEASE = 0 -ISRELEASED = True +PRERELEASE = 1 +ISRELEASED = False version = "{}.{}.{}".format(MAJOR, MINOR, MICRO) if not ISRELEASED: