diff --git a/.gitignore b/.gitignore
index 1ced9290a8..05da7d8469 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,4 +56,5 @@ examples/int8/ptq/ptq
examples/int8/qat/qat
examples/int8/training/vgg16/data/*
examples/int8/datasets/data/*
-env/**/*
\ No newline at end of file
+env/**/*
+bazel-Torch-TensorRT-Preview
\ No newline at end of file
diff --git a/README.md b/README.md
index 66a29708da..1de5d977ca 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ More Information / System Architecture:
## Building a docker container for Torch-TensorRT Preview
-We provide a `Dockerfile` in `docker/` directory. We build `Torch-TensorRT` on top of a `Pytorch NGC container` which provide basic dependencies (like CUDA, CUDNN, CUBLAS, TensorRT, Pytorch and others) The dependency libraries in the container can be found in the release notes.
+We provide a `Dockerfile` in `docker/` directory. We build `Torch-TensorRT` on top of a `Pytorch NGC container` which provide basic dependencies (like CUDA, CUDNN, CUBLAS, TensorRT, Pytorch and others) The dependency libraries in the container can be found in the release notes.
Please follow this instruction to build a Docker container.
@@ -41,7 +41,7 @@ auto compile_settings = torch_tensorrt::ts::CompileSpec({input});
// FP16 execution
compile_settings.enabled_precisions = {torch::kHalf};
// Compile module
-auto trt_mod = torch_tensorrt::ts::CompileModule(ts_mod, compile_settings);
+auto trt_mod = torch_tensorrt::ts::compile(ts_mod, compile_settings);
// Run like normal
auto results = trt_mod.forward({in_tensor});
// Save module for later
diff --git a/core/partitioning/README.md b/core/partitioning/README.md
index 5f9a910df5..f6bb4a5252 100644
--- a/core/partitioning/README.md
+++ b/core/partitioning/README.md
@@ -62,6 +62,6 @@ torchtrt::ts::CompileSpec cfg(input_sizes);
cfg.torch_fallback = torchtrt::CompileSpec::TorchFallback(true);
cfg.torch_fallback.min_block_size = 2;
cfg.torch_fallback.forced_fallback_ops.push_back("aten::relu");
-auto trt_mod = torchtrt::ts::CompileModule(mod, cfg);
+auto trt_mod = torchtrt::ts::compile(mod, cfg);
auto out = trt_mod.forward({in});
```
diff --git a/cpp/bin/torchtrtc/main.cpp b/cpp/bin/torchtrtc/main.cpp
index 6bf5fbd12a..645292ab9b 100644
--- a/cpp/bin/torchtrtc/main.cpp
+++ b/cpp/bin/torchtrtc/main.cpp
@@ -600,7 +600,7 @@ int main(int argc, char** argv) {
// Instead of compiling, just embed engine in a PyTorch module
if (embed_engine) {
std::string serialized_engine = read_buf(real_input_path);
- auto trt_mod = torchtrt::ts::EmbedEngineInNewModule(serialized_engine, compile_settings.device);
+ auto trt_mod = torchtrt::ts::embed_engine_in_new_module(serialized_engine, compile_settings.device);
trt_mod.save(real_output_path);
return 0;
}
@@ -622,12 +622,12 @@ int main(int argc, char** argv) {
}
if (save_engine) {
- auto engine = torchtrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_settings);
+ auto engine = torchtrt::ts::convert_method_to_trt_engine(mod, "forward", compile_settings);
std::ofstream out(real_output_path);
out << engine;
out.close();
} else {
- auto trt_mod = torchtrt::ts::CompileModule(mod, compile_settings);
+ auto trt_mod = torchtrt::ts::compile(mod, compile_settings);
if (!no_threshold_check &&
(compile_settings.enabled_precisions.size() == 1 &&
diff --git a/cpp/include/torch_tensorrt/torch_tensorrt.h b/cpp/include/torch_tensorrt/torch_tensorrt.h
index a60ca58a63..b5be74f0ad 100644
--- a/cpp/include/torch_tensorrt/torch_tensorrt.h
+++ b/cpp/include/torch_tensorrt/torch_tensorrt.h
@@ -701,7 +701,7 @@ struct TORCHTRT_API CompileSpec {
*
* @returns bool: Method is supported by Torch-TensorRT.TorchScript
*/
-TORCHTRT_API bool CheckMethodOperatorSupport(const torch::jit::Module& module, std::string method_name);
+TORCHTRT_API bool check_method_operator_support(const torch::jit::Module& module, std::string method_name);
/**
* @brief Compile a TorchScript module for NVIDIA GPUs using TensorRT
@@ -717,7 +717,7 @@ TORCHTRT_API bool CheckMethodOperatorSupport(const torch::jit::Module& module, s
*
* @return: A new module trageting a TensorRT engine
*/
-TORCHTRT_API torch::jit::Module CompileModule(const torch::jit::Module& module, CompileSpec info);
+TORCHTRT_API torch::jit::Module compile(const torch::jit::Module& module, CompileSpec info);
/**
* @brief Compile a TorchScript method for NVIDIA GPUs using TensorRT
@@ -733,7 +733,7 @@ TORCHTRT_API torch::jit::Module CompileModule(const torch::jit::Module& module,
* @return: std::string: Serialized TensorRT engine equivilant to the method
* graph
*/
-TORCHTRT_API std::string ConvertMethodToTRTEngine(
+TORCHTRT_API std::string convert_method_to_trt_engine(
const torch::jit::Module& module,
std::string method_name,
CompileSpec info);
@@ -751,6 +751,6 @@ TORCHTRT_API std::string ConvertMethodToTRTEngine(
*
* @return: A new module trageting a TensorRT engine
*/
-TORCHTRT_API torch::jit::Module EmbedEngineInNewModule(const std::string& engine, Device device);
+TORCHTRT_API torch::jit::Module embed_engine_in_new_module(const std::string& engine, Device device);
} // namespace torchscript
} // namespace torch_tensorrt
diff --git a/cpp/src/torch_tensorrt.cpp b/cpp/src/torch_tensorrt.cpp
index fc3a8466da..42b44833de 100644
--- a/cpp/src/torch_tensorrt.cpp
+++ b/cpp/src/torch_tensorrt.cpp
@@ -12,11 +12,11 @@ namespace torchscript {
// Defined in compile_spec.cpp
torch_tensorrt::core::CompileSpec to_internal_compile_spec(CompileSpec external);
-bool CheckMethodOperatorSupport(const torch::jit::script::Module& module, std::string method_name) {
+bool check_method_operator_support(const torch::jit::script::Module& module, std::string method_name) {
return torch_tensorrt::core::CheckMethodOperatorSupport(module, method_name);
}
-std::string ConvertMethodToTRTEngine(
+std::string convert_method_to_trt_engine(
const torch::jit::script::Module& module,
std::string method_name,
CompileSpec info) {
@@ -26,14 +26,14 @@ std::string ConvertMethodToTRTEngine(
return torch_tensorrt::core::ConvertGraphToTRTEngine(module, method_name, to_internal_compile_spec(info));
}
-torch::jit::script::Module CompileModule(const torch::jit::script::Module& module, CompileSpec info) {
+torch::jit::script::Module compile(const torch::jit::script::Module& module, CompileSpec info) {
LOG_DEBUG(get_build_info());
// Want to export a much simpler (non TRT header dependent) API so doing the
// type conversion here
return torch_tensorrt::core::CompileGraph(module, to_internal_compile_spec(info));
}
-torch::jit::Module EmbedEngineInNewModule(const std::string& engine, Device device) {
+torch::jit::Module embed_engine_in_new_module(const std::string& engine, Device device) {
return torch_tensorrt::core::EmbedEngineInNewModule(engine, to_internal_cuda_device(device));
}
diff --git a/examples/benchmark/main.cpp b/examples/benchmark/main.cpp
index 56cf0c086c..fbb8b75409 100644
--- a/examples/benchmark/main.cpp
+++ b/examples/benchmark/main.cpp
@@ -127,11 +127,11 @@ int main(int argc, const char* argv[]) {
compile_spec.enabled_precisions.insert(torch::kF16);
#endif
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
#ifdef SAVE_ENGINE
std::cout << "Compiling graph to save as TRT engine (/tmp/engine_converted_from_jit.trt)" << std::endl;
- auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_spec);
+ auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", compile_spec);
std::ofstream out("/tmp/engine_converted_from_jit.trt");
out << engine;
out.close();
diff --git a/examples/int8/ptq/main.cpp b/examples/int8/ptq/main.cpp
index d63462ee51..fed2b2337a 100644
--- a/examples/int8/ptq/main.cpp
+++ b/examples/int8/ptq/main.cpp
@@ -56,14 +56,14 @@ torch::jit::Module compile_int8_model(const std::string& data_dir, torch::jit::M
#ifdef SAVE_ENGINE
std::cout << "Compiling graph to save as TRT engine (/tmp/engine_converted_from_jit.trt)" << std::endl;
- auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_spec);
+ auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", compile_spec);
std::ofstream out("/tmp/int8_engine_converted_from_jit.trt");
out << engine;
out.close();
#endif
std::cout << "Compiling and quantizing module" << std::endl;
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
return std::move(trt_mod);
}
diff --git a/examples/int8/qat/main.cpp b/examples/int8/qat/main.cpp
index 8a2dda0bd5..b1bec3e6a2 100644
--- a/examples/int8/qat/main.cpp
+++ b/examples/int8/qat/main.cpp
@@ -40,14 +40,14 @@ torch::jit::Module compile_int8_qat_model(const std::string& data_dir, torch::ji
#ifdef SAVE_ENGINE
std::cout << "Compiling graph to save as TRT engine (/tmp/engine_converted_from_jit.trt)" << std::endl;
- auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", compile_spec);
+ auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", compile_spec);
std::ofstream out("/tmp/int8_engine_converted_from_jit.trt");
out << engine;
out.close();
#endif
std::cout << "Compiling and quantizing module" << std::endl;
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
return std::move(trt_mod);
}
diff --git a/tests/accuracy/test_dla_fp16_accuracy.cpp b/tests/accuracy/test_dla_fp16_accuracy.cpp
index e6bfbfbff0..43587c407a 100644
--- a/tests/accuracy/test_dla_fp16_accuracy.cpp
+++ b/tests/accuracy/test_dla_fp16_accuracy.cpp
@@ -34,7 +34,7 @@ TEST_P(AccuracyTests, DLAFP16AccuracyIsClose) {
compile_spec.device.allow_gpu_fallback = true;
compile_spec.workspace_size = 1 << 28;
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
torch::Tensor trt_correct = torch::zeros({1}, {torch::kCUDA}), trt_total = torch::zeros({1}, {torch::kCUDA});
for (auto batch : *eval_dataloader) {
diff --git a/tests/accuracy/test_dla_int8_accuracy.cpp b/tests/accuracy/test_dla_int8_accuracy.cpp
index 18ce29cbc6..df371e642d 100644
--- a/tests/accuracy/test_dla_int8_accuracy.cpp
+++ b/tests/accuracy/test_dla_int8_accuracy.cpp
@@ -61,7 +61,7 @@ TEST_P(AccuracyTests, DLAINT8AccuracyIsClose) {
torch::Tensor jit_accuracy = (jit_correct / jit_total) * 100;
// Compile Graph
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
// Check the INT8 accuracy in TRT
torch::Tensor trt_correct = torch::zeros({1}, {torch::kCUDA}), trt_total = torch::zeros({1}, {torch::kCUDA});
diff --git a/tests/accuracy/test_fp16_accuracy.cpp b/tests/accuracy/test_fp16_accuracy.cpp
index c206c22d5a..dd68202312 100644
--- a/tests/accuracy/test_fp16_accuracy.cpp
+++ b/tests/accuracy/test_fp16_accuracy.cpp
@@ -29,7 +29,7 @@ TEST_P(AccuracyTests, FP16AccuracyIsClose) {
auto compile_spec = torch_tensorrt::ts::CompileSpec({input_shape});
compile_spec.enabled_precisions.insert(torch::kF16);
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
torch::Tensor trt_correct = torch::zeros({1}, {torch::kCUDA}), trt_total = torch::zeros({1}, {torch::kCUDA});
for (auto batch : *eval_dataloader) {
diff --git a/tests/accuracy/test_fp32_accuracy.cpp b/tests/accuracy/test_fp32_accuracy.cpp
index bac470fc8c..47051ac05c 100644
--- a/tests/accuracy/test_fp32_accuracy.cpp
+++ b/tests/accuracy/test_fp32_accuracy.cpp
@@ -29,7 +29,7 @@ TEST_P(AccuracyTests, FP32AccuracyIsClose) {
auto compile_spec = torch_tensorrt::ts::CompileSpec({input_shape});
compile_spec.enabled_precisions.insert(torch::kF32);
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
torch::Tensor trt_correct = torch::zeros({1}, {torch::kCUDA}), trt_total = torch::zeros({1}, {torch::kCUDA});
for (auto batch : *eval_dataloader) {
diff --git a/tests/accuracy/test_int8_accuracy.cpp b/tests/accuracy/test_int8_accuracy.cpp
index f602e29171..b32c1b0b16 100644
--- a/tests/accuracy/test_int8_accuracy.cpp
+++ b/tests/accuracy/test_int8_accuracy.cpp
@@ -58,7 +58,7 @@ TEST_P(AccuracyTests, INT8AccuracyIsClose) {
torch::Tensor jit_accuracy = (jit_correct / jit_total) * 100;
// Compile Graph
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_spec);
// Check the INT8 accuracy in TRT
torch::Tensor trt_correct = torch::zeros({1}, {torch::kCUDA}), trt_total = torch::zeros({1}, {torch::kCUDA});
diff --git a/tests/cpp/test_compiled_modules.cpp b/tests/cpp/test_compiled_modules.cpp
index 57243a90c4..dc316df2d8 100644
--- a/tests/cpp/test_compiled_modules.cpp
+++ b/tests/cpp/test_compiled_modules.cpp
@@ -13,7 +13,7 @@ TEST_P(CppAPITests, CompiledModuleIsClose) {
std::vector jit_results;
jit_results.push_back(jit_results_ivalues.toTensor());
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, input_shapes);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, input_shapes);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues);
std::vector trt_results;
trt_results.push_back(trt_results_ivalues.toTensor());
diff --git a/tests/cpp/test_default_input_types.cpp b/tests/cpp/test_default_input_types.cpp
index 9cba2facd7..63904c7416 100644
--- a/tests/cpp/test_default_input_types.cpp
+++ b/tests/cpp/test_default_input_types.cpp
@@ -15,7 +15,7 @@ TEST_P(CppAPITests, InputsUseDefaultFP32) {
auto spec = torch_tensorrt::ts::CompileSpec({in});
spec.enabled_precisions.insert(torch_tensorrt::DataType::kHalf);
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, spec);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues);
std::vector trt_results;
trt_results.push_back(trt_results_ivalues.toTensor());
@@ -38,7 +38,7 @@ TEST_P(CppAPITests, InputsUseDefaultFP16) {
mod.to(torch::kHalf);
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, spec);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues);
std::vector trt_results;
trt_results.push_back(trt_results_ivalues.toTensor());
@@ -60,7 +60,7 @@ TEST_P(CppAPITests, InputsUseDefaultFP16WithoutFP16Enabled) {
mod.to(torch::kHalf);
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, spec);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues);
std::vector trt_results;
trt_results.push_back(trt_results_ivalues.toTensor());
@@ -84,7 +84,7 @@ TEST_P(CppAPITests, InputsRespectUserSettingFP16WeightsFP32In) {
mod.to(torch::kHalf);
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, spec);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues);
std::vector trt_results;
trt_results.push_back(trt_results_ivalues.toTensor());
@@ -106,7 +106,7 @@ TEST_P(CppAPITests, InputsRespectUserSettingFP32WeightsFP16In) {
auto spec = torch_tensorrt::ts::CompileSpec({in});
spec.enabled_precisions.insert(torch_tensorrt::DataType::kHalf);
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, spec);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues);
std::vector trt_results;
trt_results.push_back(trt_results_ivalues.toTensor());
diff --git a/tests/cpp/test_example_tensors.cpp b/tests/cpp/test_example_tensors.cpp
index 086c5a6450..fc77d9e4d4 100644
--- a/tests/cpp/test_example_tensors.cpp
+++ b/tests/cpp/test_example_tensors.cpp
@@ -11,7 +11,7 @@ TEST_P(CppAPITests, InputsFromTensors) {
auto spec = torch_tensorrt::ts::CompileSpec({trt_inputs_ivalues[0].toTensor()});
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, spec);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, spec);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, trt_inputs_ivalues);
std::vector trt_results;
trt_results.push_back(trt_results_ivalues.toTensor());
diff --git a/tests/cpp/test_module_fallback.cpp b/tests/cpp/test_module_fallback.cpp
index 30a045b56c..7812067048 100644
--- a/tests/cpp/test_module_fallback.cpp
+++ b/tests/cpp/test_module_fallback.cpp
@@ -26,7 +26,7 @@ TEST(CppAPITest, ResNetModuleFallbacksCorrectly) {
cfg.torch_executed_modules.push_back("torchvision.models.resnet.BasicBlock");
auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, cfg);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, cfg);
auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
}
@@ -54,7 +54,7 @@ TEST(CppAPITest, MobileNetModuleFallbacksCorrectlyWithOneEngine) {
cfg.torch_executed_modules.push_back("torchvision.models.mobilenetv2.ConvBNActivation");
auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, cfg);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, cfg);
auto g = trt_mod.get_method("forward").graph();
auto nodes = g->block()->nodes();
diff --git a/tests/cpp/test_modules_as_engines.cpp b/tests/cpp/test_modules_as_engines.cpp
index d4b6270a16..17bd856445 100644
--- a/tests/cpp/test_modules_as_engines.cpp
+++ b/tests/cpp/test_modules_as_engines.cpp
@@ -40,8 +40,8 @@ TEST_P(CppAPITests, ModuleToEngineToModuleIsClose) {
cudaGetDevice(&device_id);
compile_spec.device.device_type = torch_tensorrt::Device::DeviceType::kGPU;
compile_spec.device.gpu_id = device_id;
- auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", input_ranges);
- auto trt_mod = torch_tensorrt::ts::EmbedEngineInNewModule(engine, compile_spec.device);
+ auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", input_ranges);
+ auto trt_mod = torch_tensorrt::ts::embed_engine_in_new_module(engine, compile_spec.device);
torch::jit::IValue trt_results_ivalues = torch_tensorrt::tests::util::RunModuleForward(trt_mod, inputs_ivalues);
std::vector trt_results;
diff --git a/tests/cpp/test_multi_gpu_serde.cpp b/tests/cpp/test_multi_gpu_serde.cpp
index 08132c07f1..2356583fa3 100644
--- a/tests/cpp/test_multi_gpu_serde.cpp
+++ b/tests/cpp/test_multi_gpu_serde.cpp
@@ -14,7 +14,7 @@ TEST_P(CppAPITests, CompiledModuleIsClose) {
std::vector jit_results;
jit_results.push_back(jit_results_ivalues.toTensor());
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, input_shapes);
+ auto trt_mod = torch_tensorrt::ts::compile(mod, input_shapes);
// Deliberately changing the device ID. torch_tensorrt runtime should correct the Device ID internally
torch_tensorrt::set_device(1);
diff --git a/tests/cpp/test_multiple_registered_engines.cpp b/tests/cpp/test_multiple_registered_engines.cpp
index 30502cd0f4..2788940fe5 100644
--- a/tests/cpp/test_multiple_registered_engines.cpp
+++ b/tests/cpp/test_multiple_registered_engines.cpp
@@ -41,13 +41,13 @@ TEST(CppAPITest, CanRunMultipleEngines) {
std::vector jit2_results;
jit2_results.push_back(jit2_results_ivalues.toTensor());
- auto trt_mod1 = torch_tensorrt::ts::CompileModule(mod1, input_shapes);
+ auto trt_mod1 = torch_tensorrt::ts::compile(mod1, input_shapes);
torch::jit::IValue trt1_results_ivalues =
torch_tensorrt::tests::util::RunModuleForward(trt_mod1, trt1_inputs_ivalues);
std::vector trt1_results;
trt1_results.push_back(trt1_results_ivalues.toTensor());
- auto trt_mod2 = torch_tensorrt::ts::CompileModule(mod2, input_shapes);
+ auto trt_mod2 = torch_tensorrt::ts::compile(mod2, input_shapes);
torch::jit::IValue trt2_results_ivalues =
torch_tensorrt::tests::util::RunModuleForward(trt_mod2, trt2_inputs_ivalues);
std::vector trt2_results;
diff --git a/tests/cpp/test_runtime_thread_safety.cpp b/tests/cpp/test_runtime_thread_safety.cpp
index 4f23a87f5b..8f09838904 100644
--- a/tests/cpp/test_runtime_thread_safety.cpp
+++ b/tests/cpp/test_runtime_thread_safety.cpp
@@ -52,8 +52,8 @@ TEST(CppAPITests, RuntimeThreadSafety) {
// FP32 execution
compile_settings.enabled_precisions = {torch::kFloat};
compile_settings.strict_types = true;
- auto trt_mod = torch_tensorrt::ts::CompileModule(mod, compile_settings);
- std::cout << "torch_tensorrt::ts::CompileModule" << std::endl;
+ auto trt_mod = torch_tensorrt::ts::compile(mod, compile_settings);
+ std::cout << "torch_tensorrt::ts::compile" << std::endl;
int num_threads = 10;
std::vector out_vec(num_threads), trt_out_vec(num_threads);
diff --git a/tests/cpp/test_serialization.cpp b/tests/cpp/test_serialization.cpp
index f45a0ce604..877e42e6ab 100644
--- a/tests/cpp/test_serialization.cpp
+++ b/tests/cpp/test_serialization.cpp
@@ -27,7 +27,7 @@ TEST_P(CppAPITests, SerializedModuleIsStillCorrect) {
pre_serialized_inputs_ivalues.push_back(in.clone());
}
- auto pre_serialized_mod = torch_tensorrt::ts::CompileModule(mod, input_shapes);
+ auto pre_serialized_mod = torch_tensorrt::ts::compile(mod, input_shapes);
torch::jit::IValue pre_serialized_results_ivalues =
torch_tensorrt::tests::util::RunModuleForward(pre_serialized_mod, pre_serialized_inputs_ivalues);
std::vector pre_serialized_results;
@@ -57,7 +57,7 @@ TEST_P(CppAPITests, SerializedDynamicModuleIsStillCorrect) {
}
auto pre_serialized_mod =
- torch_tensorrt::ts::CompileModule(mod, torch_tensorrt::ts::CompileSpec(toInputRangesDynamic(input_shapes)));
+ torch_tensorrt::ts::compile(mod, torch_tensorrt::ts::CompileSpec(toInputRangesDynamic(input_shapes)));
torch::jit::IValue pre_serialized_results_ivalues =
torch_tensorrt::tests::util::RunModuleForward(pre_serialized_mod, pre_serialized_inputs_ivalues);
std::vector pre_serialized_results;
diff --git a/tests/util/run_forward.cpp b/tests/util/run_forward.cpp
index 82151796c9..b074f2c213 100644
--- a/tests/util/run_forward.cpp
+++ b/tests/util/run_forward.cpp
@@ -18,7 +18,7 @@ std::vector RunModuleForwardAsEngine(torch::jit::Module& mod, std::v
input_ranges.push_back(in.sizes());
}
- auto engine = torch_tensorrt::ts::ConvertMethodToTRTEngine(mod, "forward", input_ranges);
+ auto engine = torch_tensorrt::ts::convert_method_to_trt_engine(mod, "forward", input_ranges);
return RunEngine(engine, inputs);
}