From 9b0c63d8d42f414f6f62dd308d326c33fb181587 Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 17 Oct 2023 17:51:00 -0700 Subject: [PATCH] Deprecate FixedNoiseMultiFidelityGP (#2053) Summary: Builds on https://github.com/pytorch/botorch/pull/2052 to deprecate one more model that existed only to support a different likelihood. Differential Revision: D50393050 --- botorch/models/gp_regression_fidelity.py | 126 +++------------------ test/models/test_gp_regression_fidelity.py | 22 +++- 2 files changed, 35 insertions(+), 113 deletions(-) diff --git a/botorch/models/gp_regression_fidelity.py b/botorch/models/gp_regression_fidelity.py index 2ff0a64cd5..d5e6b76ffe 100644 --- a/botorch/models/gp_regression_fidelity.py +++ b/botorch/models/gp_regression_fidelity.py @@ -31,7 +31,7 @@ import torch from botorch.exceptions.errors import UnsupportedError -from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP +from botorch.models.gp_regression import SingleTaskGP from botorch.models.kernels.downsampling import DownsamplingKernel from botorch.models.kernels.exponential_decay import ExponentialDecayKernel from botorch.models.kernels.linear_truncated_fidelity import ( @@ -67,6 +67,7 @@ def __init__( self, train_X: Tensor, train_Y: Tensor, + train_Yvar: Optional[Tensor] = None, iteration_fidelity: Optional[int] = None, data_fidelities: Optional[Union[List[int], Tuple[int]]] = None, data_fidelity: Optional[int] = None, @@ -82,6 +83,8 @@ def __init__( where `s` is the dimension of the fidelity parameters (either one or two). train_Y: A `batch_shape x n x m` tensor of training observations. + train_Yvar: An optional `batch_shape x n x m` tensor of observed + measurement noise. iteration_fidelity: The column index for the training iteration fidelity parameter (optional). data_fidelities: The column indices for the downsampling fidelity parameter. @@ -142,17 +145,19 @@ def __init__( super().__init__( train_X=train_X, train_Y=train_Y, + train_Yvar=train_Yvar, likelihood=likelihood, covar_module=covar_module, outcome_transform=outcome_transform, input_transform=input_transform, ) self._subset_batch_dict = { - "likelihood.noise_covar.raw_noise": -2, "mean_module.raw_constant": -1, "covar_module.raw_outputscale": -1, **subset_batch_dict, } + if train_Yvar is None: + self._subset_batch_dict["likelihood.noise_covar.raw_noise"] = -2 self.to(train_X) @classmethod @@ -173,27 +178,7 @@ def construct_inputs( return inputs -class FixedNoiseMultiFidelityGP(FixedNoiseGP): - r"""A single task multi-fidelity GP model using fixed noise levels. - - A FixedNoiseGP model analogue to SingleTaskMultiFidelityGP, using a - DownsamplingKernel for the data fidelity parameter (if present) and - an ExponentialDecayKernel for the iteration fidelity parameter (if present). - - This kernel is described in [Wu2019mf]_. - - Example: - >>> train_X = torch.rand(20, 4) - >>> train_Y = train_X.pow(2).sum(dim=-1, keepdim=True) - >>> train_Yvar = torch.full_like(train_Y) * 0.01 - >>> model = FixedNoiseMultiFidelityGP( - >>> train_X, - >>> train_Y, - >>> train_Yvar, - >>> data_fidelities=[3], - >>> ) - """ - +class FixedNoiseMultiFidelityGP(SingleTaskMultiFidelityGP): def __init__( self, train_X: Tensor, @@ -207,99 +192,24 @@ def __init__( outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: - r""" - Args: - train_X: A `batch_shape x n x (d + s)` tensor of training features, - where `s` is the dimension of the fidelity parameters (either one - or two). - train_Y: A `batch_shape x n x m` tensor of training observations. - train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. - iteration_fidelity: The column index for the training iteration fidelity - parameter (optional). - data_fidelities: The column indices for the downsampling fidelity parameter. - If a list of indices is provided, a kernel will be constructed for - each index (optional). - data_fidelity: The column index for the downsampling fidelity parameter - (optional). Deprecated in favor of `data_fidelities`. - linear_truncated: If True, use a `LinearTruncatedFidelityKernel` instead - of the default kernel. - nu: The smoothness parameter for the Matern kernel: either 1/2, 3/2, or - 5/2. Only used when `linear_truncated=True`. - outcome_transform: An outcome transform that is applied to the - training data during instantiation and to the posterior during - inference (that is, the `Posterior` obtained by calling - `.posterior` on the model will be on the original scale). - input_transform: An input transform that is applied in the model's - forward pass. - """ - if data_fidelity is not None: - warnings.warn( - "The `data_fidelity` argument is deprecated and will be removed in " - "a future release. Please use `data_fidelities` instead.", - DeprecationWarning, - ) - if data_fidelities is not None: - raise ValueError( - "Cannot specify both `data_fidelity` and `data_fidelities`." - ) - data_fidelities = [data_fidelity] - - self._init_args = { - "iteration_fidelity": iteration_fidelity, - "data_fidelities": data_fidelities, - "linear_truncated": linear_truncated, - "nu": nu, - "outcome_transform": outcome_transform, - } - if iteration_fidelity is None and data_fidelities is None: - raise UnsupportedError( - "FixedNoiseMultiFidelityGP requires at least one fidelity parameter." - ) - with torch.no_grad(): - transformed_X = self.transform_inputs( - X=train_X, input_transform=input_transform - ) - self._set_dimensions(train_X=transformed_X, train_Y=train_Y) - covar_module, subset_batch_dict = _setup_multifidelity_covar_module( - dim=transformed_X.size(-1), - aug_batch_shape=self._aug_batch_shape, - iteration_fidelity=iteration_fidelity, - data_fidelities=data_fidelities, - linear_truncated=linear_truncated, - nu=nu, + r"""DEPRECATED: Use `SingleTaskMultiFidelityGP` instead.""" + warnings.warn( + "`FixedNoiseMultiFidelityGP` has been deprecated. " + "Use `SingleTaskMultiFidelityGP` with `train_Yvar` instead.", + DeprecationWarning, ) super().__init__( train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar, - covar_module=covar_module, + iteration_fidelity=iteration_fidelity, + data_fidelities=data_fidelities, + data_fidelity=data_fidelity, + linear_truncated=linear_truncated, + nu=nu, outcome_transform=outcome_transform, input_transform=input_transform, ) - self._subset_batch_dict = { - "likelihood.noise_covar.raw_noise": -2, - "mean_module.raw_constant": -1, - "covar_module.raw_outputscale": -1, - **subset_batch_dict, - } - self.to(train_X) - - @classmethod - def construct_inputs( - cls, - training_data: SupervisedDataset, - fidelity_features: List[int], - **kwargs, - ) -> Dict[str, Any]: - r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. - - Args: - training_data: Dictionary of `SupervisedDataset`. - fidelity_features: Column indices of fidelity features. - """ - inputs = super().construct_inputs(training_data=training_data, **kwargs) - inputs["data_fidelities"] = fidelity_features - return inputs def _setup_multifidelity_covar_module( diff --git a/test/models/test_gp_regression_fidelity.py b/test/models/test_gp_regression_fidelity.py index 66b41d0d9c..42c511f6f4 100644 --- a/test/models/test_gp_regression_fidelity.py +++ b/test/models/test_gp_regression_fidelity.py @@ -12,7 +12,6 @@ from botorch.exceptions.errors import UnsupportedError from botorch.exceptions.warnings import OptimizationWarning from botorch.fit import fit_gpytorch_mll -from botorch.models.gp_regression import FixedNoiseGP from botorch.models.gp_regression_fidelity import ( FixedNoiseMultiFidelityGP, SingleTaskMultiFidelityGP, @@ -243,14 +242,14 @@ def test_condition_on_observations(self): ) c_kwargs = ( {"noise": torch.full_like(Y_fant, 0.01)} - if isinstance(model, FixedNoiseGP) + if isinstance(model.likelihood, FixedNoiseGaussianLikelihood) else {} ) cm = model.condition_on_observations(X_fant, Y_fant, **c_kwargs) # fantasize at different same input points c_kwargs_same_inputs = ( {"noise": torch.full_like(Y_fant[0], 0.01)} - if isinstance(model, FixedNoiseGP) + if isinstance(model.likelihood, FixedNoiseGaussianLikelihood) else {} ) cm_same_inputs = model.condition_on_observations( @@ -309,7 +308,9 @@ def test_condition_on_observations(self): ) c_kwargs = ( {"noise": torch.full_like(Y_fant[0, 0, :], 0.01)} - if isinstance(model, FixedNoiseGP) + if isinstance( + model.likelihood, FixedNoiseGaussianLikelihood + ) else {} ) mnb = model_non_batch @@ -435,6 +436,8 @@ def test_construct_inputs(self): class TestFixedNoiseMultiFidelityGP(TestSingleTaskMultiFidelityGP): + model_class = FixedNoiseMultiFidelityGP + def _get_model_and_data( self, iteration_fidelity, @@ -468,7 +471,11 @@ def _get_model_and_data( model_kwargs["outcome_transform"] = outcome_transform if input_transform is not None: model_kwargs["input_transform"] = input_transform - model = FixedNoiseMultiFidelityGP(**model_kwargs) + if self.model_class is FixedNoiseMultiFidelityGP: + with self.assertWarnsRegex(DeprecationWarning, "SingleTaskMultiFidelityGP"): + model = FixedNoiseMultiFidelityGP(**model_kwargs) + else: + model = self.model_class(**model_kwargs) return model, model_kwargs def test_init_error(self): @@ -558,3 +565,8 @@ def test_construct_inputs(self): self.assertEqual(data_dict.get("data_fidelities", None), [1]) self.assertTrue(kwargs["train_X"].equal(data_dict["train_X"])) self.assertTrue(kwargs["train_Y"].equal(data_dict["train_Y"])) + + +class TestFixedNoiseSingleTaskMultiFidelityGP(TestFixedNoiseMultiFidelityGP): + # Test SingleTaskMultiFidelityGP with observed noise. + model_class = SingleTaskMultiFidelityGP