From f00057bf7122c066687f6cb4bb8d192fcf2a2514 Mon Sep 17 00:00:00 2001 From: Max Balandat Date: Thu, 8 Jun 2023 13:41:16 -0700 Subject: [PATCH] Fix warning in `test_normalize` (#1876) Summary: Return the proper type of `indices` in `get_init_args()` for `Normalize`. This would cause the following warning: ``` botorch/models/transforms/input.py:362: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor). indices = torch.tensor(indices, dtype=torch.long) ``` Pull Request resolved: https://github.com/pytorch/botorch/pull/1876 Reviewed By: SebastianAment Differential Revision: D46547569 Pulled By: Balandat fbshipit-source-id: 6f7f3e15d5d80851e68e9e3b60575b807c8c24a4 --- botorch/models/transforms/input.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/botorch/models/transforms/input.py b/botorch/models/transforms/input.py index 41cbd257e5..add98df046 100644 --- a/botorch/models/transforms/input.py +++ b/botorch/models/transforms/input.py @@ -324,7 +324,7 @@ def __init__( d: int, coefficient: Tensor, offset: Tensor, - indices: Optional[List[int]] = None, + indices: Optional[Union[List[int], Tensor]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, transform_on_eval: bool = True, @@ -342,7 +342,8 @@ def __init__( offset: Tensor of offset coefficients, shape must to be broadcastable with `(batch_shape x n x d)`-dim input tensors. indices: The indices of the inputs to transform. If omitted, - take all dimensions of the inputs into account. + take all dimensions of the inputs into account. Either a list of ints + or a Tensor of type `torch.long`. batch_shape: The batch shape of the inputs (assuming input tensors of shape `batch_shape x n x d`). If provided, perform individual transformation per batch, otherwise uses a single transformation. @@ -359,7 +360,9 @@ def __init__( if (indices is not None) and (len(indices) == 0): raise ValueError("`indices` list is empty!") if (indices is not None) and (len(indices) > 0): - indices = torch.tensor(indices, dtype=torch.long) + indices = torch.as_tensor( + indices, dtype=torch.long, device=coefficient.device + ) if len(indices) > d: raise ValueError("Can provide at most `d` indices!") if (indices > d - 1).any(): @@ -498,7 +501,7 @@ class Normalize(AffineInputTransform): def __init__( self, d: int, - indices: Optional[List[int]] = None, + indices: Optional[Union[List[int], Tensor]] = None, bounds: Optional[Tensor] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, @@ -626,7 +629,7 @@ class InputStandardize(AffineInputTransform): def __init__( self, d: int, - indices: Optional[List[int]] = None, + indices: Optional[Union[List[int], Tensor]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, transform_on_eval: bool = True,