diff --git a/.circleci/config.yml b/.circleci/config.yml index 77a1fd036f..3eda95d4f0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -674,7 +674,7 @@ workflows: requires: - build-x86_64-pyt-release - - test-py-ts-x86_64: + - test-py-fx-x86_64: name: test-py-fx-x86_64-pyt-release channel: "release" torch-build: << pipeline.parameters.torch-release-build >> @@ -752,7 +752,7 @@ workflows: requires: - build-x86_64-pyt-release - - test-py-ts-x86_64: + - test-py-fx-x86_64: name: test-py-fx-x86_64-pyt-release channel: "release" torch-build: << pipeline.parameters.torch-release-build >> diff --git a/cpp/include/torch_tensorrt/torch_tensorrt.h b/cpp/include/torch_tensorrt/torch_tensorrt.h index 70dea51bc7..11dc5d74c6 100644 --- a/cpp/include/torch_tensorrt/torch_tensorrt.h +++ b/cpp/include/torch_tensorrt/torch_tensorrt.h @@ -569,7 +569,7 @@ struct TORCHTRT_API CompileSpec { CompileSpec(std::vector> fixed_sizes); /** - * @brief Construct a new Extra Info object + * @brief Construct a new Compile Spec object * Convienence constructor to set fixed input size from c10::ArrayRef's (the * output of tensor.sizes()) describing size of input tensors. Each entry in * the vector represents a input and should be provided in call order. @@ -583,7 +583,7 @@ struct TORCHTRT_API CompileSpec { CompileSpec(std::vector> fixed_sizes); /** - * @brief Construct a new Extra Info object from input ranges. + * @brief Construct a new Compile Spec object from input ranges. * Each entry in the vector represents a input and should be provided in call * order. * @@ -594,8 +594,7 @@ struct TORCHTRT_API CompileSpec { CompileSpec(std::vector inputs); /** - * @brief Construct a new Extra Info object from IValue. - * The IValue store a complex Input + * @brief Construct a new Compile Spec object from IValue which represents the nesting of input tensors for a module. * * @param input_signature */ diff --git a/tests/cpp/BUILD b/tests/cpp/BUILD index 2d545dc8f1..8e479e2e0a 100644 --- a/tests/cpp/BUILD +++ b/tests/cpp/BUILD @@ -19,7 +19,7 @@ test_suite( ":test_serialization", ":test_module_fallback", ":test_example_tensors", - ":test_collection" + ":test_collections" ], ) @@ -34,7 +34,7 @@ test_suite( ":test_serialization", ":test_module_fallback", ":test_example_tensors", - ":test_collection" + ":test_collections" ], ) @@ -125,8 +125,8 @@ cc_test( ) cc_test( - name = "test_collection", - srcs = ["test_collection.cpp"], + name = "test_collections", + srcs = ["test_collections.cpp"], data = [ "//tests/modules:jit_models", ], diff --git a/tests/cpp/test_collection.cpp b/tests/cpp/test_collections.cpp similarity index 96% rename from tests/cpp/test_collection.cpp rename to tests/cpp/test_collections.cpp index c269ebac17..df2280b947 100644 --- a/tests/cpp/test_collection.cpp +++ b/tests/cpp/test_collections.cpp @@ -8,7 +8,7 @@ TEST(CppAPITests, TestCollectionStandardTensorInput) { - std::string path = "tests/modules/standard_tensor_input.jit.pt"; + std::string path = "tests/modules/standard_tensor_input_scripted.jit.pt"; torch::Tensor in0 = torch::randn({1, 3, 512, 512}, torch::kCUDA).to(torch::kHalf); std::vector inputs; inputs.push_back(in0); @@ -53,7 +53,7 @@ TEST(CppAPITests, TestCollectionStandardTensorInput) { TEST(CppAPITests, TestCollectionTupleInput) { - std::string path = "tests/modules/tuple_input.jit.pt"; + std::string path = "tests/modules/tuple_input_scripted.jit.pt"; torch::Tensor in0 = torch::randn({1, 3, 512, 512}, torch::kCUDA).to(torch::kHalf); torch::jit::Module mod; @@ -103,7 +103,7 @@ TEST(CppAPITests, TestCollectionTupleInput) { TEST(CppAPITests, TestCollectionListInput) { - std::string path = "tests/modules/list_input.jit.pt"; + std::string path = "tests/modules/list_input_scripted.jit.pt"; torch::Tensor in0 = torch::randn({1, 3, 512, 512}, torch::kCUDA).to(torch::kHalf); std::vector inputs; inputs.push_back(in0); @@ -169,7 +169,7 @@ TEST(CppAPITests, TestCollectionListInput) { TEST(CppAPITests, TestCollectionTupleInputOutput) { - std::string path = "tests/modules/tuple_input_output.jit.pt"; + std::string path = "tests/modules/tuple_input_output_scripted.jit.pt"; torch::Tensor in0 = torch::randn({1, 3, 512, 512}, torch::kCUDA).to(torch::kHalf); @@ -224,7 +224,7 @@ TEST(CppAPITests, TestCollectionTupleInputOutput) { TEST(CppAPITests, TestCollectionListInputOutput) { - std::string path = "tests/modules/list_input_output.jit.pt"; + std::string path = "tests/modules/list_input_output_scripted.jit.pt"; torch::Tensor in0 = torch::randn({1, 3, 512, 512}, torch::kCUDA).to(torch::kHalf); std::vector inputs; inputs.push_back(in0); @@ -295,7 +295,7 @@ TEST(CppAPITests, TestCollectionListInputOutput) { TEST(CppAPITests, TestCollectionComplexModel) { - std::string path = "tests/modules/complex_model.jit.pt"; + std::string path = "tests/modules/list_input_tuple_output_scripted.jit.pt"; torch::Tensor in0 = torch::randn({1, 3, 512, 512}, torch::kCUDA).to(torch::kHalf); std::vector inputs; inputs.push_back(in0); diff --git a/tests/cpp/test_example_tensors.cpp b/tests/cpp/test_example_tensors.cpp index 6561cd16a0..3ec8831f9d 100644 --- a/tests/cpp/test_example_tensors.cpp +++ b/tests/cpp/test_example_tensors.cpp @@ -9,7 +9,9 @@ TEST_P(CppAPITests, InputsFromTensors) { trt_inputs_ivalues.push_back(in.clone()); } - auto spec = torch_tensorrt::ts::CompileSpec({trt_inputs_ivalues[0].toTensor()}); + + auto inputs = std::vector{trt_inputs_ivalues[0].toTensor()}; + auto spec = torch_tensorrt::ts::CompileSpec(inputs); auto trt_mod = torch_tensorrt::ts::compile(mod, spec); torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues); diff --git a/tests/modules/hub.py b/tests/modules/hub.py index 7d3e03e395..3ad92ff79a 100644 --- a/tests/modules/hub.py +++ b/tests/modules/hub.py @@ -128,10 +128,10 @@ "model": cm.ListInputTupleOutput(), "path": "script" }, - "bert_base_uncased": { - "model": cm.BertModule(), - "path": "trace" - } + #"bert_base_uncased": { + # "model": cm.BertModule(), + # "path": "trace" + #} }