Skip to content

Commit

Permalink
Changes done internally at Facebook (#1172)
Browse files Browse the repository at this point in the history
b18ad449f9cbcad8d9e13c74c7605dc4dcca53bc Jason Park <jasonjk@fb.com> AccOpGraph and pattern matcher from GraphModule
0888aecb9f5a6add2d855300f574592587ee8484 Jason Park <jasonjk@fb.com> Dependency checker
e935e26f2ace98e9ef251ad8328d6a07ddca828f Jason Park <jasonjk@fb.com> Sort topologically.
d6b5a5187cd22a7a7340be7bf021edbaf178b981 Jason Park <jasonjk@fb.com> Grouped op fusion for layer norm
c41054bec9cf617248d08363e15992bc24eb0ce0 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.eisum
9f9505f320c87e9069658349fcc6040d8fdc77a2 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.elu
fec2cab3bb76c22f0eb660d6dd9a4f3ae5523cfd Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.embedding
1a07ef201654adb752160e8e47e2f8369abe142a Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.eq
7f110836cd4e36c04954591698aac4addac8fcb9 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.tanh
c1b61c387833adcc602036605886a05df1d18152 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.size
3b3ae7efd15a599a2ef48e98ff2c0b7c1303d187 Kefei Lu <kefeilu@fb.com> fx2trt: remove some comments
bc4724e645d10dd75b47cef084a680f5eaa9d0bf Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.gelu
252a70960be30e037bee2d712ff286743015df66 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.getitem
80039709fde7cdcdf81abdbfd2199adfe9df215b Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.gt
d6c9202ad3c1d9f35884cd189edad289e36a6787 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.hardsigmoid
6b1b7ba4690c8d5724ff6784f2f56a7cafad2c78 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.hardtanh
37747aec5f7ec8d1e1552f9280dae41910c966ba Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.isinf
6eb5891faae2e95ac7b6918f5a88b3a0a84a5140 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.leaky_relu
df212a34c238cd60a6e13f508400468cec539961 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.linear
8dc800208a7482d4b89a37526d37cdd0934a2402 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.logical_and
edfd5e9f6336700ebc91e907a9ee207d8fe25336 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.logical_or
588f7801308ee2129cb67276fe40e00f9078af1d Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.xor
cba566389cc4b310dbccceb9dd265ada4aded201 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.lt
67497e1c99ef796e5ad88b3ecf5751d596877932 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.max
c91cc6dda5221e55b2e960a6cd546f551d01cc7b Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.maximum
fd95c60b850bff8e00ba844da38c5d04c8bc25ba Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.maxpool
e7a00066f73231c466f79f15d7f48b917c855104 Shreyansh Prajapati <shreyanshp@fb.com> Test dynamic shape support for acc_ops.min
5f9fcfd27a4d1f7d6a8f034ea2b7c6c5c8a51809 Jason Park <jasonjk@fb.com> grouped swish LN
249ce82c30f0d70dd6de952c4e85ce273b09cd45 Jason Park <jasonjk@fb.com> Handling different eps for layer norms.
48907b6557d57569545d926b94f96c0d58ae2fcb Kefei Lu <kefeilu@fb.com> fx2trt: log input specs
  • Loading branch information
Wei authored Jul 11, 2022
1 parent 5be9af2 commit 0738caa
Show file tree
Hide file tree
Showing 41 changed files with 1,275 additions and 32 deletions.
2 changes: 1 addition & 1 deletion examples/fx/quantized_resnet_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def build_int8_trt(rn18):
# uncomment to check per channel quant works
weight=torch.quantization.default_per_channel_weight_observer,
)
prepared = prepare_fx(rn18, {"": qconfig})
prepared = prepare_fx(rn18, {"": qconfig}, data)
for _ in range(10):
prepared(data)
quantized_rn18 = convert_to_reference(prepared)
Expand Down
5 changes: 4 additions & 1 deletion py/torch_tensorrt/fx/lower.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,10 +104,13 @@ def __call__(self, mod, input, split_name) -> TRTInterpreterResult:
),
self.lower_setting.opt_profile_replica,
)
if self.lower_setting.explicit_batch_dimension and self.lower_setting.dynamic_batch
if self.lower_setting.explicit_batch_dimension
and self.lower_setting.dynamic_batch
else InputTensorSpec.from_tensors(input)
)
)
logger.info(f"{split_name=} {input_specs_val=}")

# Prepare algorithm selector and timing_cache for TRTInterpreter
algo_selector = None
if self.lower_setting.algo_selector:
Expand Down
6 changes: 1 addition & 5 deletions py/torch_tensorrt/fx/lower_setting.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,7 @@ class LowerSetting(LowerSettingBasic):
cache file is provided.
cuda_graph_batch_size (int): Cuda graph batch size, default to be -1.
preset_lowerer (str): when specified, use a preset logic to build the
instance of Lowerer. Refer to
`caffe2.torch.fb.model_transform.fx2trt.presets.LowererPresetsManager` on
how presets are applied. Refer to
`caffe2.torch.fb.model_transform.fx2trt.presets.ESUHMLowererPreset` on how
to add a preset.
instance of Lowerer.
opt_profile_replica (int): the number of opt profile set for TensorRT engine, this field is
only used by explicit batch dim with dynamic shape mode.
dynamic_batch: enable the dynamic shape in TRT with dim=-1 for the 1st dimension.
Expand Down
7 changes: 5 additions & 2 deletions py/torch_tensorrt/fx/passes/pass_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,17 @@ def pass_with_validation(
y = y.cpu()
accuracy_check = torch.allclose(x, y, **kwargs)
if not accuracy_check:
_LOGGER.error(
f"Pass {pass_} failed correctness check, get original model output as {x} and processed model output as {y} for output {kk}."
)
if suppress_accuracy_check_failure:
_LOGGER.error(
f"pass {pass_} failed correctness check due to output {kk}, escape current pass."
f"Pass {pass_} failed correctness check due to output {kk}."
)
return processed_module
else:
raise RuntimeError(
f"pass {pass_} failed correctness check due to output {kk}"
f"Pass {pass_} failed correctness check due to output {kk}"
)
return processed_module

Expand Down
18 changes: 18 additions & 0 deletions py/torch_tensorrt/fx/test/converters/acc_op/test_dequantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,24 @@ def forward(self, x):
TestModule(), input_specs, expected_ops={acc_ops.dequantize}
)

def test_dequantize_with_dynamic_shape_four_dimensions(self):
class TestModule(nn.Module):
def forward(self, x):
x = torch.quantize_per_tensor(x, 1, 0, torch.quint8)
return x.dequantize()

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))],
),
]

self.run_test_with_dynamic_shape(
TestModule(), input_specs, expected_ops={acc_ops.dequantize}
)


if __name__ == "__main__":
run_tests()
33 changes: 32 additions & 1 deletion py/torch_tensorrt/fx/test/converters/acc_op/test_einsum.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec


class TestConverter(AccTestCase):
Expand All @@ -30,6 +30,37 @@ def forward(self, x, y):
test_implicit_batch_dim=False,
)

@parameterized.expand(
[
("4d_dim", "bcwd,bcdh->bcwh", (2, 3, 4, 5), (2, 3, 5, 6)),
("4d_dim_ext", "bcxd,bcyd->bcxy", (2, 3, 4, 5), (2, 3, 6, 5)),
# TRT does not support ellipsis or diagonal operations
]
)
def test_einsum_with_dynamic_shape_four_dimensions(
self, _, equation, x_size, y_size
):
class Einsum(nn.Module):
def forward(self, x, y):
return torch.einsum(equation, x, y)

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 3, 3), (1, 2, 3, 3), (3, 3, 3, 3))],
),
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 3, 3), (1, 2, 3, 3), (3, 3, 3, 3))],
),
]

self.run_test_with_dynamic_shape(
Einsum(), input_specs, expected_ops={acc_ops.einsum}
)


if __name__ == "__main__":
run_tests()
17 changes: 17 additions & 0 deletions py/torch_tensorrt/fx/test/converters/acc_op/test_elu.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,23 @@ def forward(self, x):
TestModule(), input_specs, expected_ops={acc_ops.elu}
)

def test_elu_with_dynamic_shape_four_dimensions(self):
class TestModule(nn.Module):
def forward(self, x):
return nn.functional.elu(x)

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 5), (3, 3, 3, 5))],
),
]

self.run_test_with_dynamic_shape(
TestModule(), input_specs, expected_ops={acc_ops.elu}
)


if __name__ == "__main__":
run_tests()
42 changes: 41 additions & 1 deletion py/torch_tensorrt/fx/test/converters/acc_op/test_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
from parameterized import param, parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec


@unittest.skip(
Expand Down Expand Up @@ -62,6 +62,46 @@ def forward(self, indices, weights):
test_explicit_batch_dim=True,
)

def test_embedding_with_dynamic_shape_four_dimensions(
self,
test_name,
indices_tensor,
weights_tensor,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
):
class TestEmbedding(torch.nn.Module):
def forward(self, indices, weights):
return torch.nn.functional.embedding(
input=indices,
weight=weights,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse,
)

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))],
),
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))],
),
]

self.run_test_with_dynamic_shape(
TestEmbedding(), input_specs, expected_ops={acc_ops.embedding}
)


if __name__ == "__main__":
run_tests()
44 changes: 43 additions & 1 deletion py/torch_tensorrt/fx/test/converters/acc_op/test_eq.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec


class TestEqConverter(AccTestCase):
Expand Down Expand Up @@ -184,6 +184,28 @@ def forward(self, x, y):
)


class TestEqOperatorSimpleConverterWithDynamicShape(AccTestCase):
def test_eq(self):
class Eq(torch.nn.Module):
def forward(self, x, y):
return x == y

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))],
),
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))],
),
]

self.run_test_with_dynamic_shape(Eq(), input_specs, expected_ops={acc_ops.eq})


class TestEqOperatorConstantConverter(AccTestCase):
@parameterized.expand(
[
Expand Down Expand Up @@ -243,5 +265,25 @@ def forward(self, x):
)


class TestConstInputConverterWithDynamicShape(AccTestCase):
def test_eq(self):
class Eq(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x):
return x.shape[0] == 4

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))],
),
]

self.run_test_with_dynamic_shape(Eq(), input_specs, expected_ops={acc_ops.eq})


if __name__ == "__main__":
run_tests()
17 changes: 17 additions & 0 deletions py/torch_tensorrt/fx/test/converters/acc_op/test_gelu.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,23 @@ def forward(self, x):
TestModule(), input_specs, expected_ops={acc_ops.gelu}
)

def test_gelu_with_dynamic_shape_four_dimensions(self):
class TestModule(nn.Module):
def forward(self, x):
return nn.functional.gelu(x)

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))],
),
]

self.run_test_with_dynamic_shape(
TestModule(), input_specs, expected_ops={acc_ops.gelu}
)


if __name__ == "__main__":
run_tests()
46 changes: 46 additions & 0 deletions py/torch_tensorrt/fx/test/converters/acc_op/test_getitem.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,52 @@ def forward(self, x):
Getitem(idx), input_specs, expected_ops={acc_ops.getitem}
)

# Testing with following parameters results into Error:
# AssertionError: We don't support slicing tensor on dynamic shape.

"""
("ellipsis", (slice(None, None, None), ..., slice(0, -3, 2))),
(
"slice_end_none",
(slice(None, None, None), slice(None, None, None), slice(1, None, 1)),
),
(
"slice_step_none",
(slice(None, None, None), slice(None, None, None), slice(0, 3, None)),
),
"""

@parameterized.expand(
[
("slice_batch_dim", slice(None, None, None)),
(
"slice_all_none",
(slice(None, None, None), slice(None, None, None)),
),
]
)
def test_getitem_with_dynamic_shape_four_dimensions(self, _, idx):
class Getitem(nn.Module):
def __init__(self, idx):
super().__init__()
self.idx = idx

def forward(self, x):
x = x + x
return x[self.idx]

input_specs = [
InputTensorSpec(
shape=(-1, -1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1, 1), (3, 3, 3, 3), (5, 5, 5, 5))],
),
]

self.run_test_with_dynamic_shape(
Getitem(idx), input_specs, expected_ops={acc_ops.getitem}
)


if __name__ == "__main__":
run_tests()
Loading

0 comments on commit 0738caa

Please sign in to comment.