Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pass X to OutcomeTransform #2663

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion botorch/acquisition/analytic.py
Original file line number Diff line number Diff line change
@@ -1116,7 +1116,7 @@ def _get_noiseless_fantasy_model(
# Not transforming Yvar because 1e-7 is already close to 0 and it is a
# relative, not absolute, value.
Y_fantasized, _ = outcome_transform(
Y_fantasized.unsqueeze(-1), Yvar.unsqueeze(-1)
Y_fantasized.unsqueeze(-1), Yvar.unsqueeze(-1), X=batch_X_observed
)
Y_fantasized = Y_fantasized.squeeze(-1)
input_transform = getattr(model, "input_transform", None)
4 changes: 2 additions & 2 deletions botorch/models/approximate_gp.py
Original file line number Diff line number Diff line change
@@ -172,7 +172,7 @@ def posterior(

posterior = GPyTorchPosterior(distribution=dist)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
posterior = posterior_transform(posterior)
return posterior
@@ -397,7 +397,7 @@ def __init__(
UserInputWarning,
stacklevel=3,
)
train_Y, _ = outcome_transform(train_Y)
train_Y, _ = outcome_transform(train_Y, X=transformed_X)
self._validate_tensor_args(X=transformed_X, Y=train_Y)
validate_input_scaling(train_X=transformed_X, train_Y=train_Y)
if train_Y.shape[-1] != num_outputs:
2 changes: 1 addition & 1 deletion botorch/models/ensemble.py
Original file line number Diff line number Diff line change
@@ -79,7 +79,7 @@ def posterior(
# `posterior` (as is done in GP models). This is more general since it works
# even if the transform doesn't support `untransform_posterior`.
if hasattr(self, "outcome_transform"):
values, _ = self.outcome_transform.untransform(values)
values, _ = self.outcome_transform.untransform(values, X=X)
if output_indices is not None:
values = values[..., output_indices]
posterior = EnsemblePosterior(values=values)
4 changes: 3 additions & 1 deletion botorch/models/fully_bayesian.py
Original file line number Diff line number Diff line change
@@ -373,7 +373,9 @@ def __init__(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)
self._validate_tensor_args(X=transformed_X, Y=train_Y)
validate_input_scaling(
train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar
4 changes: 3 additions & 1 deletion botorch/models/fully_bayesian_multitask.py
Original file line number Diff line number Diff line change
@@ -242,7 +242,9 @@ def __init__(
)
if outcome_transform is not None:
outcome_transform.train() # Ensure we learn parameters here on init
train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)
if train_Yvar is not None: # Clamp after transforming
train_Yvar = train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL)

4 changes: 3 additions & 1 deletion botorch/models/gp_regression.py
Original file line number Diff line number Diff line change
@@ -160,7 +160,9 @@ def __init__(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)
# Validate again after applying the transforms
self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar)
ignore_X_dims = getattr(self, "_ignore_X_dims_scaling_check", None)
10 changes: 5 additions & 5 deletions botorch/models/gpytorch.py
Original file line number Diff line number Diff line change
@@ -198,7 +198,7 @@ def posterior(
mvn = self.likelihood(mvn, X)
posterior = GPyTorchPosterior(distribution=mvn)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
return posterior_transform(posterior)
return posterior
@@ -244,7 +244,7 @@ def condition_on_observations(
# (unless we've already trasnformed if BatchedMultiOutputGPyTorchModel)
if not isinstance(self, BatchedMultiOutputGPyTorchModel):
# `noise` is assumed to already be outcome-transformed.
Y, _ = self.outcome_transform(Y=Y, Yvar=Yvar)
Y, _ = self.outcome_transform(Y=Y, Yvar=Yvar, X=X)
# Validate using strict=False, since we cannot tell if Y has an explicit
# output dimension. Do not check shapes when fantasizing as they are
# not expected to match.
@@ -467,7 +467,7 @@ def posterior(

posterior = GPyTorchPosterior(distribution=mvn)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
return posterior_transform(posterior)
return posterior
@@ -511,7 +511,7 @@ def condition_on_observations(
if hasattr(self, "outcome_transform"):
# We need to apply transforms before shifting batch indices around.
# `noise` is assumed to already be outcome-transformed.
Y, _ = self.outcome_transform(Y)
Y, _ = self.outcome_transform(Y, X=X)
# Do not check shapes when fantasizing as they are not expected to match.
if fantasize_flag.off():
self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False)
@@ -924,7 +924,7 @@ def posterior(
)
posterior = GPyTorchPosterior(distribution=mtmvn)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
if posterior_transform is not None:
return posterior_transform(posterior)
return posterior
14 changes: 7 additions & 7 deletions botorch/models/higher_order_gp.py
Original file line number Diff line number Diff line change
@@ -91,7 +91,7 @@ def _return_to_output_shape(self, tsr: Tensor) -> Tensor:
return out

def forward(
self, Y: Tensor, Yvar: Tensor | None = None
self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None
) -> tuple[Tensor, Tensor | None]:
Y = self._squeeze_to_single_output(Y)
if Yvar is not None:
@@ -107,21 +107,21 @@ def forward(
return Y_out, Yvar_out

def untransform(
self, Y: Tensor, Yvar: Tensor | None = None
self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None
) -> tuple[Tensor, Tensor | None]:
Y = self._squeeze_to_single_output(Y)
if Yvar is not None:
Yvar = self._squeeze_to_single_output(Yvar)

Y, Yvar = super().untransform(Y, Yvar)
Y, Yvar = super().untransform(Y=Y, Yvar=Yvar, X=X)

Y = self._return_to_output_shape(Y)
if Yvar is not None:
Yvar = self._return_to_output_shape(Yvar)
return Y, Yvar

def untransform_posterior(
self, posterior: HigherOrderGPPosterior
self, posterior: HigherOrderGPPosterior, X: Tensor | None = None
) -> TransformedPosterior:
# TODO: return a HigherOrderGPPosterior once rescaling constant
# muls * LinearOperators won't force a dense decomposition rather than a
@@ -227,7 +227,7 @@ def __init__(
output_shape=train_Y.shape[-num_output_dims:],
batch_shape=batch_shape,
)
train_Y, _ = outcome_transform(train_Y)
train_Y, _ = outcome_transform(train_Y, X=train_X)

self._aug_batch_shape = batch_shape
self._num_dimensions = num_output_dims + 1
@@ -416,7 +416,7 @@ def condition_on_observations(
"""
if hasattr(self, "outcome_transform"):
# we need to apply transforms before shifting batch indices around
Y, noise = self.outcome_transform(Y=Y, Yvar=noise)
Y, noise = self.outcome_transform(Y=Y, Yvar=noise, X=X)
# Do not check shapes when fantasizing as they are not expected to match.
if fantasize_flag.off():
self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False)
@@ -540,7 +540,7 @@ def posterior(
num_outputs=self._num_outputs,
)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
return posterior

def make_posterior_variances(
9 changes: 6 additions & 3 deletions botorch/models/latent_kronecker_gp.py
Original file line number Diff line number Diff line change
@@ -81,7 +81,7 @@ def __init__(
self._use_min = use_min

def forward(
self, Y: Tensor, Yvar: Tensor | None = None
self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None
) -> tuple[Tensor, Tensor | None]:
r"""Standardize outcomes.

@@ -93,6 +93,7 @@ def forward(
Y: A `batch_shape x n x m`-dim tensor of training targets.
Yvar: A `batch_shape x n x m`-dim tensor of observation noises
associated with the training targets (if applicable).
X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable).

Returns:
A two-tuple with the transformed outcomes:
@@ -240,7 +241,9 @@ def __init__(
outcome_transform = MinMaxStandardize(batch_shape=batch_shape)
if outcome_transform is not None:
# transform outputs once and keep the results
train_Y = outcome_transform(train_Y.unsqueeze(-1))[0].squeeze(-1)
train_Y = outcome_transform(train_Y.unsqueeze(-1), X=transformed_X)[
0
].squeeze(-1)

ExactGP.__init__(
self,
@@ -506,7 +509,7 @@ def _rsample_from_base_samples(
)
# samples.shape = (*sample_shape, *broadcast_shape, n_test_x, n_t)
if hasattr(self, "outcome_transform") and self.outcome_transform is not None:
samples, _ = self.outcome_transform.untransform(samples)
samples, _ = self.outcome_transform.untransform(samples, X=X)
return samples

def condition_on_observations(
2 changes: 1 addition & 1 deletion botorch/models/model_list_gp_regression.py
Original file line number Diff line number Diff line change
@@ -117,7 +117,7 @@ def condition_on_observations(
else:
noise_i = torch.cat([noise[..., k] for k in range(i, j)], dim=-1)
if hasattr(model, "outcome_transform"):
y_i, noise_i = model.outcome_transform(y_i, noise_i)
y_i, noise_i = model.outcome_transform(y_i, noise_i, X=X_i)
if noise_i is not None:
noise_i = noise_i.squeeze(0)
targets.append(y_i)
8 changes: 5 additions & 3 deletions botorch/models/multitask.py
Original file line number Diff line number Diff line change
@@ -219,7 +219,9 @@ def __init__(
if outcome_transform == DEFAULT:
outcome_transform = Standardize(m=1, batch_shape=train_X.shape[:-2])
if outcome_transform is not None:
train_Y, train_Yvar = outcome_transform(Y=train_Y, Yvar=train_Yvar)
train_Y, train_Yvar = outcome_transform(
Y=train_Y, Yvar=train_Yvar, X=transformed_X
)

# squeeze output dim
train_Y = train_Y.squeeze(-1)
@@ -464,7 +466,7 @@ def __init__(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, _ = outcome_transform(train_Y)
train_Y, _ = outcome_transform(train_Y, X=transformed_X)

self._validate_tensor_args(X=transformed_X, Y=train_Y)
self._num_outputs = train_Y.shape[-1]
@@ -772,7 +774,7 @@ def posterior(
)

if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
posterior = self.outcome_transform.untransform_posterior(posterior, X=X)
return posterior

def train(self, val=True, *args, **kwargs):
Loading
Loading