Skip to content

Commit

Permalink
Deprecate FixedNoiseMultiFidelityGP (pytorch#2053)
Browse files Browse the repository at this point in the history
Summary:

Builds on pytorch#2052 to deprecate one more model that existed only to support a different likelihood.

Differential Revision: D50393050
  • Loading branch information
saitcakmak authored and facebook-github-bot committed Oct 18, 2023
1 parent 2897345 commit 9b0c63d
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 113 deletions.
126 changes: 18 additions & 108 deletions botorch/models/gp_regression_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

import torch
from botorch.exceptions.errors import UnsupportedError
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.kernels.downsampling import DownsamplingKernel
from botorch.models.kernels.exponential_decay import ExponentialDecayKernel
from botorch.models.kernels.linear_truncated_fidelity import (
Expand Down Expand Up @@ -67,6 +67,7 @@ def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Optional[Tensor] = None,
iteration_fidelity: Optional[int] = None,
data_fidelities: Optional[Union[List[int], Tuple[int]]] = None,
data_fidelity: Optional[int] = None,
Expand All @@ -82,6 +83,8 @@ def __init__(
where `s` is the dimension of the fidelity parameters (either one
or two).
train_Y: A `batch_shape x n x m` tensor of training observations.
train_Yvar: An optional `batch_shape x n x m` tensor of observed
measurement noise.
iteration_fidelity: The column index for the training iteration fidelity
parameter (optional).
data_fidelities: The column indices for the downsampling fidelity parameter.
Expand Down Expand Up @@ -142,17 +145,19 @@ def __init__(
super().__init__(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
likelihood=likelihood,
covar_module=covar_module,
outcome_transform=outcome_transform,
input_transform=input_transform,
)
self._subset_batch_dict = {
"likelihood.noise_covar.raw_noise": -2,
"mean_module.raw_constant": -1,
"covar_module.raw_outputscale": -1,
**subset_batch_dict,
}
if train_Yvar is None:
self._subset_batch_dict["likelihood.noise_covar.raw_noise"] = -2
self.to(train_X)

@classmethod
Expand All @@ -173,27 +178,7 @@ def construct_inputs(
return inputs


class FixedNoiseMultiFidelityGP(FixedNoiseGP):
r"""A single task multi-fidelity GP model using fixed noise levels.
A FixedNoiseGP model analogue to SingleTaskMultiFidelityGP, using a
DownsamplingKernel for the data fidelity parameter (if present) and
an ExponentialDecayKernel for the iteration fidelity parameter (if present).
This kernel is described in [Wu2019mf]_.
Example:
>>> train_X = torch.rand(20, 4)
>>> train_Y = train_X.pow(2).sum(dim=-1, keepdim=True)
>>> train_Yvar = torch.full_like(train_Y) * 0.01
>>> model = FixedNoiseMultiFidelityGP(
>>> train_X,
>>> train_Y,
>>> train_Yvar,
>>> data_fidelities=[3],
>>> )
"""

class FixedNoiseMultiFidelityGP(SingleTaskMultiFidelityGP):
def __init__(
self,
train_X: Tensor,
Expand All @@ -207,99 +192,24 @@ def __init__(
outcome_transform: Optional[OutcomeTransform] = None,
input_transform: Optional[InputTransform] = None,
) -> None:
r"""
Args:
train_X: A `batch_shape x n x (d + s)` tensor of training features,
where `s` is the dimension of the fidelity parameters (either one
or two).
train_Y: A `batch_shape x n x m` tensor of training observations.
train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise.
iteration_fidelity: The column index for the training iteration fidelity
parameter (optional).
data_fidelities: The column indices for the downsampling fidelity parameter.
If a list of indices is provided, a kernel will be constructed for
each index (optional).
data_fidelity: The column index for the downsampling fidelity parameter
(optional). Deprecated in favor of `data_fidelities`.
linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead
of the default kernel.
nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or
5/2. Only used when `linear_truncated=True`.
outcome_transform: An outcome transform that is applied to the
training data during instantiation and to the posterior during
inference (that is, the `Posterior` obtained by calling
`.posterior` on the model will be on the original scale).
input_transform: An input transform that is applied in the model's
forward pass.
"""
if data_fidelity is not None:
warnings.warn(
"The `data_fidelity` argument is deprecated and will be removed in "
"a future release. Please use `data_fidelities` instead.",
DeprecationWarning,
)
if data_fidelities is not None:
raise ValueError(
"Cannot specify both `data_fidelity` and `data_fidelities`."
)
data_fidelities = [data_fidelity]

self._init_args = {
"iteration_fidelity": iteration_fidelity,
"data_fidelities": data_fidelities,
"linear_truncated": linear_truncated,
"nu": nu,
"outcome_transform": outcome_transform,
}
if iteration_fidelity is None and data_fidelities is None:
raise UnsupportedError(
"FixedNoiseMultiFidelityGP requires at least one fidelity parameter."
)
with torch.no_grad():
transformed_X = self.transform_inputs(
X=train_X, input_transform=input_transform
)
self._set_dimensions(train_X=transformed_X, train_Y=train_Y)
covar_module, subset_batch_dict = _setup_multifidelity_covar_module(
dim=transformed_X.size(-1),
aug_batch_shape=self._aug_batch_shape,
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
linear_truncated=linear_truncated,
nu=nu,
r"""DEPRECATED: Use `SingleTaskMultiFidelityGP` instead."""
warnings.warn(
"`FixedNoiseMultiFidelityGP` has been deprecated. "
"Use `SingleTaskMultiFidelityGP` with `train_Yvar` instead.",
DeprecationWarning,
)
super().__init__(
train_X=train_X,
train_Y=train_Y,
train_Yvar=train_Yvar,
covar_module=covar_module,
iteration_fidelity=iteration_fidelity,
data_fidelities=data_fidelities,
data_fidelity=data_fidelity,
linear_truncated=linear_truncated,
nu=nu,
outcome_transform=outcome_transform,
input_transform=input_transform,
)
self._subset_batch_dict = {
"likelihood.noise_covar.raw_noise": -2,
"mean_module.raw_constant": -1,
"covar_module.raw_outputscale": -1,
**subset_batch_dict,
}
self.to(train_X)

@classmethod
def construct_inputs(
cls,
training_data: SupervisedDataset,
fidelity_features: List[int],
**kwargs,
) -> Dict[str, Any]:
r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`.
Args:
training_data: Dictionary of `SupervisedDataset`.
fidelity_features: Column indices of fidelity features.
"""
inputs = super().construct_inputs(training_data=training_data, **kwargs)
inputs["data_fidelities"] = fidelity_features
return inputs


def _setup_multifidelity_covar_module(
Expand Down
22 changes: 17 additions & 5 deletions test/models/test_gp_regression_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.fit import fit_gpytorch_mll
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gp_regression_fidelity import (
FixedNoiseMultiFidelityGP,
SingleTaskMultiFidelityGP,
Expand Down Expand Up @@ -243,14 +242,14 @@ def test_condition_on_observations(self):
)
c_kwargs = (
{"noise": torch.full_like(Y_fant, 0.01)}
if isinstance(model, FixedNoiseGP)
if isinstance(model.likelihood, FixedNoiseGaussianLikelihood)
else {}
)
cm = model.condition_on_observations(X_fant, Y_fant, **c_kwargs)
# fantasize at different same input points
c_kwargs_same_inputs = (
{"noise": torch.full_like(Y_fant[0], 0.01)}
if isinstance(model, FixedNoiseGP)
if isinstance(model.likelihood, FixedNoiseGaussianLikelihood)
else {}
)
cm_same_inputs = model.condition_on_observations(
Expand Down Expand Up @@ -309,7 +308,9 @@ def test_condition_on_observations(self):
)
c_kwargs = (
{"noise": torch.full_like(Y_fant[0, 0, :], 0.01)}
if isinstance(model, FixedNoiseGP)
if isinstance(
model.likelihood, FixedNoiseGaussianLikelihood
)
else {}
)
mnb = model_non_batch
Expand Down Expand Up @@ -435,6 +436,8 @@ def test_construct_inputs(self):


class TestFixedNoiseMultiFidelityGP(TestSingleTaskMultiFidelityGP):
model_class = FixedNoiseMultiFidelityGP

def _get_model_and_data(
self,
iteration_fidelity,
Expand Down Expand Up @@ -468,7 +471,11 @@ def _get_model_and_data(
model_kwargs["outcome_transform"] = outcome_transform
if input_transform is not None:
model_kwargs["input_transform"] = input_transform
model = FixedNoiseMultiFidelityGP(**model_kwargs)
if self.model_class is FixedNoiseMultiFidelityGP:
with self.assertWarnsRegex(DeprecationWarning, "SingleTaskMultiFidelityGP"):
model = FixedNoiseMultiFidelityGP(**model_kwargs)
else:
model = self.model_class(**model_kwargs)
return model, model_kwargs

def test_init_error(self):
Expand Down Expand Up @@ -558,3 +565,8 @@ def test_construct_inputs(self):
self.assertEqual(data_dict.get("data_fidelities", None), [1])
self.assertTrue(kwargs["train_X"].equal(data_dict["train_X"]))
self.assertTrue(kwargs["train_Y"].equal(data_dict["train_Y"]))


class TestFixedNoiseSingleTaskMultiFidelityGP(TestFixedNoiseMultiFidelityGP):
# Test SingleTaskMultiFidelityGP with observed noise.
model_class = SingleTaskMultiFidelityGP

0 comments on commit 9b0c63d

Please sign in to comment.