diff --git a/README.md b/README.md index 3db7e1035f..5a2baf5d07 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Torch-TensorRT [![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/) +[![CircleCI](https://circleci.com/gh/pytorch/TensorRT.svg?style=svg)](https://app.circleci.com/pipelines/github/pytorch/TensorRT) > Ahead of Time (AOT) compiling for PyTorch JIT and FX diff --git a/core/BUILD b/core/BUILD index 599a965733..d802a8eff6 100644 --- a/core/BUILD +++ b/core/BUILD @@ -33,8 +33,8 @@ cc_library( "//core/util/logging", "@tensorrt//:nvinfer", ] + select({ - ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch", "@libtorch_pre_cxx11_abi//:c10_cuda"], - "//conditions:default": ["@libtorch//:libtorch", "@libtorch//:c10_cuda"], + ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"], + "//conditions:default": ["@libtorch//:libtorch"], }), alwayslink = True, ) diff --git a/core/conversion/converters/impl/einsum.cpp b/core/conversion/converters/impl/einsum.cpp index fdaafe4e33..2fa27716d0 100644 --- a/core/conversion/converters/impl/einsum.cpp +++ b/core/conversion/converters/impl/einsum.cpp @@ -12,7 +12,7 @@ namespace impl { namespace { auto einsum_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( - {"aten::einsum(str equation, Tensor[] tensors) -> (Tensor)", + {"aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> (Tensor)", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool { // Extract equation and list of tensors auto equation = args[0].unwrapToString(); diff --git a/tests/core/conversion/converters/test_einsum.cpp b/tests/core/conversion/converters/test_einsum.cpp index ff7ba201ff..d45dea54fb 100644 --- a/tests/core/conversion/converters/test_einsum.cpp +++ b/tests/core/conversion/converters/test_einsum.cpp @@ -9,7 +9,8 @@ TEST(Converters, ATenEinsumConvertsMatMulCorrectly) { graph(%x.1 : Tensor, %x.2 : Tensor): %0 : str = prim::Constant[value="ij,jk->ik"]() %3 : Tensor[] = prim::ListConstruct(%x.1, %x.2) - %4 : Tensor = aten::einsum(%0, %3) + %none : NoneType = prim::Constant() + %4 : Tensor = aten::einsum(%0, %3, %none) return (%4))IR"; auto g = std::make_shared(); @@ -34,7 +35,8 @@ TEST(Converters, ATenEinsumConvertsElementwiseProdCorrectly) { graph(%x.1 : Tensor, %x.2 : Tensor): %0 : str = prim::Constant[value="abcd,abcd->abcd"]() %3 : Tensor[] = prim::ListConstruct(%x.1, %x.2) - %4 : Tensor = aten::einsum(%0, %3) + %none : NoneType = prim::Constant() + %4 : Tensor = aten::einsum(%0, %3, %none) return (%4))IR"; auto g = std::make_shared(); @@ -59,7 +61,8 @@ TEST(Converters, ATenEinsumConvertsTransposeCorrectly) { graph(%x.1 : Tensor): %0 : str = prim::Constant[value="jk->kj"]() %3 : Tensor[] = prim::ListConstruct(%x.1) - %4 : Tensor = aten::einsum(%0, %3) + %none : NoneType = prim::Constant() + %4 : Tensor = aten::einsum(%0, %3, %none) return (%4))IR"; auto g = std::make_shared(); @@ -83,7 +86,8 @@ TEST(Converters, ATenEinsumConvertsVectorsCorrectly) { graph(%x.1 : Tensor, %x.2 : Tensor): %0 : str = prim::Constant[value="a,b->ab"]() %3 : Tensor[] = prim::ListConstruct(%x.1, %x.2) - %4 : Tensor = aten::einsum(%0, %3) + %none : NoneType = prim::Constant() + %4 : Tensor = aten::einsum(%0, %3, %none) return (%4))IR"; auto g = std::make_shared(); diff --git a/tests/cpp/test_serialization.cpp b/tests/cpp/test_serialization.cpp index fea4e467f0..ba7e38194f 100644 --- a/tests/cpp/test_serialization.cpp +++ b/tests/cpp/test_serialization.cpp @@ -15,7 +15,7 @@ std::vector toInputRangesDynamic(std::vector