Skip to content

Commit

Permalink
Supporting InputTransforms in SparseOutlierLikelihood and `get_po…
Browse files Browse the repository at this point in the history
…sterior_over_support` (#2659)

Summary:
Pull Request resolved: #2659

Adding support to using the `SparseOutlierLikelihood` in conjunction with input transforms. To do this, we need to pass the transformed inputs separately to any of the marginal likelihood computation similar to the [model closures in the model fitting routines](https://github.com/pytorch/botorch/blob/466da73a18731d45b034bfd36011bb3eb150fdd8/botorch/optim/closures/model_closures.py#L185).

Reviewed By: saitcakmak

Differential Revision: D67605578

fbshipit-source-id: d37696aa238802001265fab7bd9402f59f2cf387
  • Loading branch information
SebastianAment authored and facebook-github-bot committed Jan 7, 2025
1 parent 37f04d1 commit aef25d7
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 2 deletions.
3 changes: 2 additions & 1 deletion botorch/models/likelihoods/sparse_outlier_noise.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,8 @@ def _optimal_rhos(self, mll: ExactMarginalLogLikelihood) -> Tensor:
mll.train() # NOTE: this changes model.train_inputs to be unnormalized.
X, Y = mll.model.train_inputs[0], mll.model.train_targets
F = mll.model(X)
L = mll.likelihood(F, X)
TX = mll.model.transform_inputs(X)
L = mll.likelihood(F, TX) # likelihood expects transformed inputs
S = L.covariance_matrix # (Kernel Matrix + Noise Matrix)

# NOTE: The following computation is mathematically equivalent to the formula
Expand Down
3 changes: 2 additions & 1 deletion botorch/models/relevance_pursuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,8 @@ def log_prior(
mll.train()
X, Y = mll.model.train_inputs[0], mll.model.train_targets
F = mll.model(X)
mll_i = cast(Tensor, mll(F, Y, X))
TX = mll.model.transform_inputs(X) if mll.model.training else X
mll_i = cast(Tensor, mll(F, Y, TX))
log_mll_trace.append(mll_i)
support_size, log_prior_i = log_prior(
model,
Expand Down
22 changes: 22 additions & 0 deletions test/models/test_relevance_pursuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@
from __future__ import annotations

import itertools
import warnings

from functools import partial
from unittest.mock import patch

import gpytorch
import torch
Expand Down Expand Up @@ -197,6 +199,26 @@ def _test_robust_gp_end_to_end(
undetected_outliers = set(outlier_indices) - set(sparse_module.support)
self.assertEqual(len(undetected_outliers), 0)

with patch.object(
SparseOutlierNoise,
"forward",
wraps=sparse_module.forward,
) as sparse_module_fwd:
# testing that posterior inference on training set does not throw warnings,
# which means that the passed inputs are the equal to the cached ones.
with warnings.catch_warnings(record=True) as warnings_log:
map_model.posterior(X)
self.assertEqual(warnings_log, [])
# Testing that the noise module's forward receives transformed inputs
X_in_call = sparse_module_fwd.call_args.kwargs["X"]
self.assertIsInstance(X_in_call, list)
self.assertEqual(len(X_in_call), 1)
X_in_call = X_in_call[0]
X_max = X_in_call.amax(dim=0)
X_min = X_in_call.amin(dim=0)
self.assertAllClose(X_max, torch.ones_like(X_max))
self.assertAllClose(X_min, torch.zeros_like(X_min))

def test_robust_relevance_pursuit(self) -> None:
for optimizer, convex_parameterization, dtype in itertools.product(
[forward_relevance_pursuit, backward_relevance_pursuit],
Expand Down

0 comments on commit aef25d7

Please sign in to comment.