diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py index bc01c52db9..2391a15ad1 100644 --- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py +++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py @@ -3041,9 +3041,175 @@ def aten_ops_pad( ) -@dynamo_tensorrt_converter(torch.ops.aten.upsample_nearest2d.default) -@dynamo_tensorrt_converter(torch.ops.aten.upsample_nearest2d.vec) -def upsample_nearest2d( +for op in ( + torch.ops.aten.upsample_nearest1d, + torch.ops.aten.upsample_nearest2d, + torch.ops.aten.upsample_nearest3d, + torch.ops.aten.upsample_linear1d, + torch.ops.aten.upsample_bilinear2d, + torch.ops.aten.upsample_trilinear3d, + torch.ops.aten.upsample_bicubic2d, +): + for key in ( + torch._C.DispatchKey.Autograd, + torch._C.DispatchKey.CompositeImplicitAutograd, + ): + if key in op.default.py_kernels: + del op.default.py_kernels[key] + if key in op.vec.py_kernels: + del op.vec.py_kernels[key] + + +def upsample_compute_output_size( + input_size: torch.Size, + output_size: Optional[Sequence[int]], + scale_factors: Optional[Sequence[float]], +) -> Sequence[int]: + spatial_dimensions = len(input_size) - 2 + + if output_size is not None: + torch._check( + scale_factors is None, + lambda: "Must specify exactly one of output_size and scale_factors", + ) + torch._check(len(output_size) == spatial_dimensions) + return output_size + + if scale_factors is not None: + torch._check( + output_size is None, + lambda: "Must specify exactly one of output_size and scale_factors", + ) + torch._check(len(scale_factors) == spatial_dimensions) + output_size = [] + for i, s in enumerate(scale_factors): + output_size.append(int(input_size[i + 2] * s)) + return output_size + + torch._check( + False, lambda: "Must specify exactly one of output_size and scale_factors" + ) + + +@torch.ops.aten.upsample_nearest1d.vec.py_impl( + torch._C.DispatchKey.CompositeImplicitAutograd +) +def upsample_nearest1d_vec( + input: torch.Tensor, + output_size: Optional[Sequence[int]], + scale_factors: Optional[Sequence[float]], +) -> torch.Tensor: + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + if scale_factors is not None: + return torch.ops.aten.upsample_nearest1d.default(input, osize, *scale_factors) + return torch.ops.aten.upsample_nearest1d.default(input, osize) + + +@torch.ops.aten.upsample_nearest2d.vec.py_impl( + torch._C.DispatchKey.CompositeImplicitAutograd +) +def upsample_nearest2d_vec( + input: torch.Tensor, + output_size: Optional[Sequence[int]], + scale_factors: Optional[Sequence[float]], +) -> torch.Tensor: + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + if scale_factors is not None: + return torch.ops.aten.upsample_nearest2d.default(input, osize, *scale_factors) + return torch.ops.aten.upsample_nearest2d.default(input, osize) + + +@torch.ops.aten.upsample_nearest3d.vec.py_impl( + torch._C.DispatchKey.CompositeImplicitAutograd +) +def upsample_nearest3d_vec( + input: torch.Tensor, + output_size: Optional[Sequence[int]], + scale_factors: Optional[Sequence[float]], +) -> torch.Tensor: + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + if scale_factors is not None: + return torch.ops.aten.upsample_nearest3d.default(input, osize, *scale_factors) + return torch.ops.aten.upsample_nearest3d.default(input, osize) + + +@torch.ops.aten.upsample_linear1d.vec.py_impl( + torch._C.DispatchKey.CompositeImplicitAutograd +) +def upsample_linear1d_vec( + input: torch.Tensor, + output_size: Optional[Sequence[int]], + align_corners: bool, + scale_factors: Optional[Sequence[float]], +) -> torch.Tensor: + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + if scale_factors is not None: + return torch.ops.aten.upsample_linear1d.default( + input, osize, align_corners, *scale_factors + ) + return torch.ops.aten.upsample_linear1d.default(input, osize, align_corners) + + +@torch.ops.aten.upsample_bilinear2d.vec.py_impl( + torch._C.DispatchKey.CompositeImplicitAutograd +) +def upsample_bilinear2d_vec( + input: torch.Tensor, + output_size: Optional[Sequence[int]], + align_corners: bool, + scale_factors: Optional[Sequence[float]], +) -> torch.Tensor: + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + if scale_factors is not None: + return torch.ops.aten.upsample_bilinear2d.default( + input, osize, align_corners, *scale_factors + ) + return torch.ops.aten.upsample_bilinear2d.default(input, osize, align_corners) + + +@torch.ops.aten.upsample_trilinear3d.vec.py_impl( + torch._C.DispatchKey.CompositeImplicitAutograd +) +def upsample_trilinear3d_vec( + input: torch.Tensor, + output_size: Optional[Sequence[int]], + align_corners: bool, + scale_factors: Optional[Sequence[float]], +) -> torch.Tensor: + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + if scale_factors is not None: + return torch.ops.aten.upsample_trilinear3d.default( + input, osize, align_corners, *scale_factors + ) + return torch.ops.aten.upsample_trilinear3d.default(input, osize, align_corners) + + +@torch.ops.aten.upsample_bicubic2d.vec.py_impl( + torch._C.DispatchKey.CompositeImplicitAutograd +) +def upsample_bicubic2d_vec( + input: torch.Tensor, + output_size: Optional[Sequence[int]], + align_corners: bool, + scale_factors: Optional[Sequence[float]], +) -> torch.Tensor: + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + if scale_factors is not None: + return torch.ops.aten.upsample_bicubic2d.default( + input, osize, align_corners, *scale_factors + ) + return torch.ops.aten.upsample_bicubic2d.default(input, osize, align_corners) + + +@dynamo_tensorrt_converter( + torch.ops.aten.upsample_nearest1d.default, supports_dynamic_shapes=True +) +@enforce_tensor_types( + { + 0: (TRTTensor,), + } +) +def aten_ops_upsample_nearest1d( ctx: ConversionContext, target: Target, args: Tuple[Argument, ...], @@ -3055,17 +3221,23 @@ def upsample_nearest2d( target, SourceIR.ATEN, name, - input=args[0], - out_shape=args_bounds_check(args, 1), - scale_factors=args_bounds_check(args, 2), - resize_mode="nearest", + args[0], + size=args[1], + scale_factor=None if len(args) < 3 else [args[2]], + mode="nearest", align_corners=False, ) -@dynamo_tensorrt_converter(torch.ops.aten.upsample_bilinear2d.default) -@dynamo_tensorrt_converter(torch.ops.aten.upsample_bilinear2d.vec) -def upsample_bilinear2d( +@dynamo_tensorrt_converter( + torch.ops.aten.upsample_nearest2d.default, supports_dynamic_shapes=True +) +@enforce_tensor_types( + { + 0: (TRTTensor,), + } +) +def aten_ops_upsample_nearest2d( ctx: ConversionContext, target: Target, args: Tuple[Argument, ...], @@ -3077,11 +3249,151 @@ def upsample_bilinear2d( target, SourceIR.ATEN, name, - input=args[0], - out_shape=args_bounds_check(args, 1), - scale_factors=args_bounds_check(args, 3), - resize_mode="bilinear", - align_corners=args_bounds_check(args, 2), + args[0], + size=args[1], + scale_factor=None if len(args) < 4 else [args[2], args[3]], + mode="nearest", + align_corners=False, + ) + + +@dynamo_tensorrt_converter( + torch.ops.aten.upsample_nearest3d.default, supports_dynamic_shapes=True +) +@enforce_tensor_types( + { + 0: (TRTTensor,), + } +) +def aten_ops_upsample_nearest3d( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.upsample.upsample( + ctx, + target, + SourceIR.ATEN, + name, + args[0], + size=args[1], + scale_factor=None if len(args) < 5 else [args[2], args[3], args[4]], + mode="nearest", + align_corners=False, + ) + + +@dynamo_tensorrt_converter( + torch.ops.aten.upsample_linear1d.default, supports_dynamic_shapes=True +) +@enforce_tensor_types( + { + 0: (TRTTensor,), + } +) +def aten_ops_upsample_linear1d( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.upsample.upsample( + ctx, + target, + SourceIR.ATEN, + name, + args[0], + size=args[1], + scale_factor=None if len(args) < 4 else [args[3]], + mode="linear", + align_corners=args[2], + ) + + +@dynamo_tensorrt_converter( + torch.ops.aten.upsample_bilinear2d.default, supports_dynamic_shapes=True +) +@enforce_tensor_types( + { + 0: (TRTTensor,), + } +) +def aten_ops_upsample_bilinear2d( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.upsample.upsample( + ctx, + target, + SourceIR.ATEN, + name, + args[0], + size=args[1], + scale_factor=None if len(args) < 5 else [args[3], args[4]], + mode="bilinear", + align_corners=args[2], + ) + + +@dynamo_tensorrt_converter( + torch.ops.aten.upsample_trilinear3d.default, supports_dynamic_shapes=True +) +@enforce_tensor_types( + { + 0: (TRTTensor,), + } +) +def aten_ops_upsample_trilinear3d( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.upsample.upsample( + ctx, + target, + SourceIR.ATEN, + name, + args[0], + size=args[1], + scale_factor=None if len(args) < 6 else [args[3], args[4], args[5]], + mode="trilinear", + align_corners=args[2], + ) + + +@dynamo_tensorrt_converter( + torch.ops.aten.upsample_bicubic2d.default, supports_dynamic_shapes=True +) +@enforce_tensor_types( + { + 0: (TRTTensor,), + } +) +def aten_ops_upsample_bicubic2d( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.upsample.upsample( + ctx, + target, + SourceIR.ATEN, + name, + args[0], + size=args[1], + scale_factor=None if len(args) < 5 else [args[3], args[4]], + mode="bicubic", + align_corners=args[2], ) diff --git a/py/torch_tensorrt/dynamo/conversion/impl/upsample.py b/py/torch_tensorrt/dynamo/conversion/impl/upsample.py index c61aad4290..5d1e281699 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/upsample.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/upsample.py @@ -4,8 +4,12 @@ from torch.fx.node import Target from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext -from torch_tensorrt.fx.converters.converter_utils import set_layer_name -from torch_tensorrt.fx.types import TRTTensor +from torch_tensorrt.dynamo.conversion.converter_utils import ( + has_dynamic_shape, + set_layer_name, +) +from torch_tensorrt.dynamo.conversion.impl.shape import get_shape_with_dynamic_shape +from torch_tensorrt.dynamo.types import TRTTensor def upsample( @@ -14,54 +18,42 @@ def upsample( source_ir: Optional[SourceIR], name: str, input: TRTTensor, - out_shape: Optional[Sequence[int]], - scale_factors: Optional[Sequence[float]], - resize_mode: str, + size: Sequence[int], + scale_factor: Optional[Sequence[float]], + mode: str, align_corners: bool, ) -> TRTTensor: - resize_layer = ctx.net.add_resize(input) - # output size calculation - # Pytorch assumes that one of out_shape/scale_factor is None - # Pytorch assumes that dimensions match for out_shape/scale factor - if out_shape is not None: - resize_layer.shape = list(input.shape)[:2] + list(out_shape) - elif scale_factors is not None: - resize_layer.scales = [1.0, 1.0] + list(scale_factors) - else: - raise RuntimeError( - "At least one of out_shape and scale_factors should be specified." - ) + layer = ctx.net.add_resize(input) - # interpolate mode - if resize_mode == "nearest" or None: - resize_layer.resize_mode = trt.InterpolationMode.NEAREST - elif resize_mode == "bilinear": - resize_layer.resize_mode = trt.InterpolationMode.LINEAR - if align_corners is None or not align_corners: - raise RuntimeError( - f"Interpolation works differently is align_corners is False for {resize_mode} mode in PyTorch and TensorRT." - ) + if scale_factor is not None and all(s is not None for s in scale_factor): + layer.scales = [1.0, 1.0] + list(scale_factor) else: - raise RuntimeError( - f"Interpolation mode is {resize_mode} which is not supported by TensorRT." - ) - - if resize_mode == "nearest": - resize_layer.coordinate_transformation = ( - trt.ResizeCoordinateTransformation.ASYMMETRIC - ) - elif resize_mode == "bilinear": - # align corners - if align_corners is not None and align_corners: - resize_layer.coordinate_transformation = ( - trt.ResizeCoordinateTransformation.ALIGN_CORNERS + shape = list(input.shape)[:2] + list(size) + if has_dynamic_shape(shape): + shape = get_shape_with_dynamic_shape( + ctx, target, source_ir, name, shape, input ) + layer.set_input(1, shape) else: - resize_layer.coordinate_transformation = ( - trt.ResizeCoordinateTransformation.ASYMMETRIC - ) + layer.shape = shape - set_layer_name(resize_layer, target, name, source_ir) + if mode == "nearest": + layer.resize_mode = trt.InterpolationMode.NEAREST + layer.coordinate_transformation = trt.ResizeCoordinateTransformation.ASYMMETRIC + elif mode in ("linear", "bilinear", "trilinear"): + layer.resize_mode = trt.InterpolationMode.LINEAR + layer.coordinate_transformation = ( + trt.ResizeCoordinateTransformation.ALIGN_CORNERS + if align_corners + else trt.ResizeCoordinateTransformation.HALF_PIXEL + ) + elif mode == "bicubic": + layer.resize_mode = trt.InterpolationMode.CUBIC + layer.coordinate_transformation = ( + trt.ResizeCoordinateTransformation.ALIGN_CORNERS + if align_corners + else trt.ResizeCoordinateTransformation.HALF_PIXEL + ) - out = resize_layer.get_output(0) - return out + set_layer_name(layer, target, name, source_ir) + return layer.get_output(0) diff --git a/py/torch_tensorrt/dynamo/lowering/_decomposition_groups.py b/py/torch_tensorrt/dynamo/lowering/_decomposition_groups.py index 3ff8181de0..0a1688b295 100644 --- a/py/torch_tensorrt/dynamo/lowering/_decomposition_groups.py +++ b/py/torch_tensorrt/dynamo/lowering/_decomposition_groups.py @@ -151,8 +151,6 @@ aten.unfold_backward, aten.unfold_copy, aten._unsafe_index, - aten.upsample_bilinear2d, - aten.upsample_bilinear2d.vec, aten.upsample_nearest2d_backward, aten.var, aten.var_mean, diff --git a/tests/py/dynamo/conversion/test_upsample.py b/tests/py/dynamo/conversion/test_upsample.py deleted file mode 100644 index 448b3afb84..0000000000 --- a/tests/py/dynamo/conversion/test_upsample.py +++ /dev/null @@ -1,97 +0,0 @@ -import torch -from parameterized import parameterized -from torch.testing._internal.common_utils import run_tests - -from .harness import DispatchTestCase - - -class TestUpsampleConverter(DispatchTestCase): - # test case for nearest upsample, using output_size, scale_factors is disabled here - @parameterized.expand( - [ - ("upsample_nearest2d.vec_outshape_0", (2, 2), (4, 4)), - ("upsample_nearest2d.vec_outshape_1", (2, 2), (5, 5)), - ] - ) - def test_upsample_nearest_output_shape(self, _, input_shape, output_shape): - class Upsample(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return torch.ops.aten.upsample_nearest2d.vec(input, output_shape, None) - - input = [torch.randn([1, 1] + list(input_shape))] - self.run_test(Upsample(), input) - - # test case for nearest upsample, using scale_factors, output_size is disabled here - @parameterized.expand( - [ - ("upsample_nearest2d.vec_scale_0", (2, 2), (2, 2)), - ("upsample_nearest2d.vec_scale_1", (2, 2), (1.5, 1.5)), - ] - ) - def test_upsample_nearest_scale_factor(self, _, input_shape, scale_factor): - class Upsample(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return torch.ops.aten.upsample_nearest2d.vec(input, None, scale_factor) - - input = [torch.randn([1, 1] + list(input_shape))] - self.run_test(Upsample(), input) - - # test case for bilinear upsample, using output_size, scale_factors is disabled here - @parameterized.expand( - [ - ("upsample_bilinear2d.vec_outshape_0", (2, 2), (4, 4), True), - ("upsample_bilinear2d.vec_outshape_1", (2, 2), (5, 5), True), - ] - ) - def test_upsample_bilinear_output_shape( - self, _, input_shape, output_shape, align_corners - ): - class Upsample(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return torch.ops.aten.upsample_bilinear2d.vec( - input, - output_shape, - align_corners, - None, - ) - - input = [torch.randn([1, 1] + list(input_shape))] - self.run_test(Upsample(), input) - - # test case for bilinear upsample, using scale_factors, output_shape is disabled here - @parameterized.expand( - [ - ("upsample_bilinear2d.vec_scale_0", (2, 2), (2, 2), True), - ("upsample_bilinear2d.vec_scale_1", (2, 2), (1.5, 1.5), True), - ] - ) - def test_upsample_bilinear_scale_factors( - self, _, input_shape, scale_factors, align_corners - ): - class Upsample(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return torch.ops.aten.upsample_bilinear2d.vec( - input, - None, - align_corners, - scale_factors, - ) - - input = [torch.randn([1, 1] + list(input_shape))] - self.run_test(Upsample(), input) - - -if __name__ == "__main__": - run_tests() diff --git a/tests/py/dynamo/conversion/test_upsample_aten.py b/tests/py/dynamo/conversion/test_upsample_aten.py new file mode 100644 index 0000000000..0e33838349 --- /dev/null +++ b/tests/py/dynamo/conversion/test_upsample_aten.py @@ -0,0 +1,309 @@ +import torch +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + +from .harness import DispatchTestCase + + +class TestUpsampleConverter(DispatchTestCase): + @parameterized.expand( + [ + ([7], [3], None), + ([7], [10], 1.5), + ] + ) + def test_nearest1d(self, input_size, output_size, scales): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_nearest1d.default(x, output_size, scales) + + inputs = [torch.randn([1, 1] + input_size)] + self.run_test(TestModule(), inputs) + + @parameterized.expand( + [ + ([3], None), + ([13], 1.5), + ] + ) + def test_nearest1d_dynamic_shape(self, output_size, scales): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_nearest1d.default(x, output_size, scales) + + input_specs = [ + Input( + min_shape=(1, 1, 1), + opt_shape=(5, 5, 5), + max_shape=(9, 9, 9), + dtype=torch.float32, + ) + ] + self.run_test_with_dynamic_shape(TestModule(), input_specs) + + @parameterized.expand( + [ + ([7, 7], [3, 3], None, None), + ([7, 7], [3, 10], 0.5, 1.5), + ] + ) + def test_nearest2d(self, input_size, output_size, scales_h, scales_w): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_nearest2d.default( + x, output_size, scales_h, scales_w + ) + + inputs = [torch.randn([1, 1] + input_size)] + self.run_test(TestModule(), inputs) + + @parameterized.expand( + [ + ([3, 3], None, None), + ([4, 13], 0.5, 1.5), + ] + ) + def test_nearest2d_dynamic_shape(self, output_size, scales_h, scales_w): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_nearest2d.default( + x, output_size, scales_h, scales_w + ) + + input_specs = [ + Input( + min_shape=(1, 1, 1, 1), + opt_shape=(5, 5, 5, 5), + max_shape=(9, 9, 9, 9), + dtype=torch.float32, + ) + ] + self.run_test_with_dynamic_shape(TestModule(), input_specs) + + @parameterized.expand( + [ + ([7, 7, 7], [3, 3, 3], None, None, None), + ([7, 7, 7], [3, 7, 10], 0.5, 1.0, 1.5), + ] + ) + def test_nearest3d(self, input_size, output_size, scales_d, scales_h, scales_w): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_nearest3d.default( + x, output_size, scales_d, scales_h, scales_w + ) + + inputs = [torch.randn([1, 1] + input_size)] + self.run_test(TestModule(), inputs) + + @parameterized.expand( + [ + ([3, 3, 3], None, None, None), + ([4, 9, 13], 0.5, 1.0, 1.5), + ] + ) + def test_nearest3d_dynamic_shape(self, output_size, scales_d, scales_h, scales_w): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_nearest3d.default( + x, output_size, scales_d, scales_h, scales_w + ) + + input_specs = [ + Input( + min_shape=(1, 1, 1, 1, 1), + opt_shape=(5, 5, 5, 5, 5), + max_shape=(9, 9, 9, 9, 9), + dtype=torch.float32, + ) + ] + self.run_test_with_dynamic_shape(TestModule(), input_specs) + + @parameterized.expand( + [ + ([7], [3], True, None), + ([7], [3], False, None), + ([7], [10], True, 1.5), + ([7], [10], False, 1.5), + ] + ) + def test_linear1d(self, input_size, output_size, align_corners, scales): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_linear1d.default( + x, output_size, align_corners, scales + ) + + inputs = [torch.randn([1, 1] + input_size)] + self.run_test(TestModule(), inputs) + + @parameterized.expand( + [ + ([3], True, None), + ([3], False, None), + ([13], True, 1.5), + ([13], False, 1.5), + ] + ) + def test_linear1d_dynamic_shape(self, output_size, align_corners, scales): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_linear1d.default( + x, output_size, align_corners, scales + ) + + input_specs = [ + Input( + min_shape=(1, 1, 1), + opt_shape=(5, 5, 5), + max_shape=(9, 9, 9), + dtype=torch.float32, + ) + ] + self.run_test_with_dynamic_shape(TestModule(), input_specs) + + @parameterized.expand( + [ + ([7, 7], [3, 3], True, None, None), + ([7, 7], [3, 3], False, None, None), + ([7, 7], [3, 10], True, 0.5, 1.5), + ([7, 7], [3, 10], False, 0.5, 1.5), + ] + ) + def test_bilinear2d( + self, input_size, output_size, align_corners, scales_h, scales_w + ): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_bilinear2d.default( + x, output_size, align_corners, scales_h, scales_w + ) + + inputs = [torch.randn([1, 1] + input_size)] + self.run_test(TestModule(), inputs) + + @parameterized.expand( + [ + ([3, 3], True, None, None), + ([3, 3], False, None, None), + ([4, 13], True, 0.5, 1.5), + ([4, 13], False, 0.5, 1.5), + ] + ) + def test_bilinear2d_dynamic_shape( + self, output_size, align_corners, scales_h, scales_w + ): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_bilinear2d.default( + x, output_size, align_corners, scales_h, scales_w + ) + + input_specs = [ + Input( + min_shape=(1, 1, 1, 1), + opt_shape=(5, 5, 5, 5), + max_shape=(9, 9, 9, 9), + dtype=torch.float32, + ) + ] + self.run_test_with_dynamic_shape(TestModule(), input_specs) + + @parameterized.expand( + [ + ([7, 7, 7], [3, 3, 3], True, None, None, None), + ([7, 7, 7], [3, 3, 3], False, None, None, None), + ([7, 7, 7], [3, 7, 10], True, 0.5, 1.0, 1.5), + ([7, 7, 7], [3, 7, 10], False, 0.5, 1.0, 1.5), + ] + ) + def test_trilinear3d( + self, input_size, output_size, align_corners, scales_d, scales_h, scales_w + ): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_trilinear3d.default( + x, output_size, align_corners, scales_d, scales_h, scales_w + ) + + inputs = [torch.randn([1, 1] + input_size)] + self.run_test(TestModule(), inputs) + + @parameterized.expand( + [ + ([3, 3, 3], True, None, None, None), + ([3, 3, 3], False, None, None, None), + ([4, 9, 13], True, 0.5, 1.0, 1.5), + ([4, 9, 13], False, 0.5, 1.0, 1.5), + ] + ) + def test_trilinear3d_dynamic_shape( + self, output_size, align_corners, scales_d, scales_h, scales_w + ): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_trilinear3d.default( + x, output_size, align_corners, scales_d, scales_h, scales_w + ) + + input_specs = [ + Input( + min_shape=(1, 1, 1, 1, 1), + opt_shape=(5, 5, 5, 5, 5), + max_shape=(9, 9, 9, 9, 9), + dtype=torch.float32, + ) + ] + self.run_test_with_dynamic_shape(TestModule(), input_specs) + + @parameterized.expand( + [ + ([7, 7], [3, 3], True, None, None), + ([7, 7], [3, 3], False, None, None), + ([7, 7], [3, 10], True, 0.5, 1.5), + ([7, 7], [3, 10], False, 0.5, 1.5), + ] + ) + def test_bicubic2d( + self, input_size, output_size, align_corners, scales_h, scales_w + ): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_bicubic2d.default( + x, output_size, align_corners, scales_h, scales_w + ) + + inputs = [torch.randn([1, 1] + input_size)] + self.run_test(TestModule(), inputs) + + @parameterized.expand( + [ + ([3, 3], True, None, None), + ([3, 3], False, None, None), + ([4, 13], True, 0.5, 1.5), + ([4, 13], False, 0.5, 1.5), + ] + ) + def test_bicubic2d_dynamic_shape( + self, output_size, align_corners, scales_h, scales_w + ): + class TestModule(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.upsample_bicubic2d.default( + x, output_size, align_corners, scales_h, scales_w + ) + + input_specs = [ + Input( + min_shape=(1, 1, 1, 1), + opt_shape=(5, 5, 5, 5), + max_shape=(9, 9, 9, 9), + dtype=torch.float32, + ) + ] + self.run_test_with_dynamic_shape(TestModule(), input_specs) + + +if __name__ == "__main__": + run_tests()