From f534f123ab333a500e580e91def5fb06f77e5771 Mon Sep 17 00:00:00 2001 From: Apurba Bose <44209735+apbose@users.noreply.github.com> Date: Tue, 30 Apr 2024 22:39:42 -0700 Subject: [PATCH] Rand converter - evaluator (#2580) --- .../dynamo/conversion/ops_evaluators.py | 71 +++++++++ tests/py/dynamo/conversion/harness.py | 41 +++++- tests/py/dynamo/conversion/test_rand_aten.py | 135 ++++++++++++++++++ 3 files changed, 243 insertions(+), 4 deletions(-) create mode 100644 tests/py/dynamo/conversion/test_rand_aten.py diff --git a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py index f83e0e5008..2ddc75bd0d 100644 --- a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py +++ b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py @@ -47,3 +47,74 @@ def aten_ops_arange_start_step( name: str, ) -> Union[TRTTensor, Sequence[TRTTensor]]: return np.arange(*args) + + +def rand_validator(rand_node: Node) -> bool: + dtype = rand_node.kwargs.get("dtype", None) + layout = rand_node.kwargs.get("layout", None) + if dtype is not None: + _LOGGER.debug( + f"Currently we don't support specifying output dtype, got {dtype}." + ) + return False + if layout is not None: + _LOGGER.debug(f"Currently we don't support specifying layout, got {layout}.") + return False + return True + + +@dynamo_tensorrt_converter( + torch.ops.aten.rand.default, capability_validator=rand_validator +) +def aten_ops_rand( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return np.random.rand(*args[0]) + + +@dynamo_tensorrt_converter( + torch.ops.aten.randn.default, capability_validator=rand_validator +) +def aten_ops_randn( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return np.random.randn(*args[0]) + + +def randperm_validator(randperm_node: Node) -> bool: + dtype = randperm_node.kwargs.get("dtype", None) + layout = randperm_node.kwargs.get("layout", None) + input = randperm_node.args[0] + if not isinstance(input, int): + _LOGGER.error(f"Input should be of type int.") + return False + if dtype is not None: + _LOGGER.debug( + f"Currently we don't support specifying output dtype, got {dtype}." + ) + return False + if layout is not None: + _LOGGER.debug(f"Currently we don't support specifying layout, got {layout}.") + return False + return True + + +@dynamo_tensorrt_converter( + torch.ops.aten.randperm.default, capability_validator=randperm_validator +) +def aten_ops_randperm( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return np.random.permutation(args[0]) diff --git a/tests/py/dynamo/conversion/harness.py b/tests/py/dynamo/conversion/harness.py index 7ce3939371..d3d3580359 100644 --- a/tests/py/dynamo/conversion/harness.py +++ b/tests/py/dynamo/conversion/harness.py @@ -138,9 +138,7 @@ def run_test_custom_compare_results( if len(expected_ops): self.assert_has_op(mod, expected_ops) - interpreter_result = interpreter.run( - precision=torch.half if fp16_mode else torch.float - ) + interpreter_result = interpreter.run() trt_mod = PythonTorchTensorRTModule( interpreter_result.engine, interpreter_result.input_names, @@ -149,7 +147,6 @@ def run_test_custom_compare_results( res_trt = trt_mod(*cuda_inputs).cpu() res_cpu = mod(*cuda_inputs).cpu() assert len(res_trt) == len(res_cpu) - assert len(res_cpu) == len(comparators) for output_trt, output_cpu, comparator in zip( res_trt, res_cpu, comparators ): @@ -270,6 +267,42 @@ def run_test( check_dtype, ) + def run_test_compare_tensor_attributes_only( + self, + mod, + inputs, + expected_ops, + comparators: List[Tuple[Callable, List]], + precision=torch.float, + output_dtypes=None, + use_dynamo_tracer=False, + enable_passes=False, + ): + mod.eval() + mod = self.generate_graph( + mod, + inputs, + use_dynamo_tracer=use_dynamo_tracer, + enable_passes=enable_passes, + ) + # Previous instance of the interpreter auto-casted 64-bit inputs + # We replicate this behavior here + compilation_settings = CompilationSettings( + enabled_precisions={dtype._from(precision)}, + truncate_long_and_double=True, + debug=True, + ) + + interp = TRTInterpreter( + mod, + Input.from_tensors(inputs), + output_dtypes=output_dtypes, + compilation_settings=compilation_settings, + ) + super().run_test_custom_compare_results( + mod, inputs, expected_ops, interp, comparators + ) + def run_test_with_dynamic_shape( self, mod, diff --git a/tests/py/dynamo/conversion/test_rand_aten.py b/tests/py/dynamo/conversion/test_rand_aten.py new file mode 100644 index 0000000000..4be44a90ec --- /dev/null +++ b/tests/py/dynamo/conversion/test_rand_aten.py @@ -0,0 +1,135 @@ +import torch +import torch.nn as nn +import torch_tensorrt +from parameterized import parameterized +from torch.testing._internal.common_utils import TestCase, run_tests + +from .harness import DispatchTestCase + +rand_ops = [ + ( + "rand_one_dimension", + (lambda shape: torch.ops.aten.rand(shape)), + [1], + ), + ( + "rand_two_dimension", + (lambda shape: torch.ops.aten.rand(shape)), + [1, 2], + ), + ( + "rand_three_dimension", + (lambda shape: torch.ops.aten.rand(shape)), + [2, 3, 4], + ), + ( + "randn_one_dimension", + (lambda shape: torch.ops.aten.randn(shape)), + [1], + ), + ( + "randn_two_dimension", + (lambda shape: torch.ops.aten.randn(shape)), + [2, 3], + ), + ( + "randn_three_dimension", + (lambda shape: torch.ops.aten.randn(shape)), + [2, 3, 4], + ), +] + + +rand_perm_ops = [ + ( + "randperm_one_case", + (lambda x: torch.ops.aten.randperm(x)), + [1], + ), + ( + "randperm_two_case", + (lambda x: torch.ops.aten.randperm(x)), + [150], + ), + ( + "randperm_three_case", + (lambda x: torch.ops.aten.randperm(x)), + [1500], + ), +] + + +class TestRandConverter(DispatchTestCase): + @parameterized.expand( + [ + ( + rand_op[0], + rand_op[1], + rand_op[2], + ) + for rand_op in rand_ops + ] + ) + def test_rand(self, name, op, shape_or_input): + class TestModule(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + shape_or_input[0] = x.shape[0] + return op(shape_or_input) + + rand_model = TestModule() + + inputs = [torch.randint(1, 3, shape_or_input, dtype=torch.int32)] + comparator_shape = lambda x, y, check_dtype: x.shape == y.shape and ( + x.dtype == y.dtype if check_dtype else True + ) + expected_ops = [] + self.run_test_compare_tensor_attributes_only( + rand_model, + inputs, + expected_ops, + [(comparator_shape, [True])], + use_dynamo_tracer=True, + ) + + @parameterized.expand( + [ + ( + rand_op[0], + rand_op[1], + rand_op[2], + ) + for rand_op in rand_perm_ops + ] + ) + def test_rand(self, name, op, shape_or_input): + class TestModule(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + shape_or_input[0] = x.shape[0] + return op(shape_or_input[0]) + + rand_model = TestModule() + # cannot use self.run_test() since it expects input in form of tensor + + inputs = [torch.randint(1, 3, shape_or_input, dtype=torch.int32)] + comparator_shape = lambda x, y, check_dtype: x.shape == y.shape and ( + x.dtype == y.dtype if check_dtype else True + ) + expected_ops = [] + # TRT-TRT returns int32 while torch returns int64 + self.run_test_compare_tensor_attributes_only( + rand_model, + inputs, + expected_ops, + [(comparator_shape, [False])], + use_dynamo_tracer=True, + ) + + +if __name__ == "__main__": + run_tests()