From 7c7c23ceb673d08a3f8a552cead452b6b79edffb Mon Sep 17 00:00:00 2001 From: TRTorch Github Bot Date: Fri, 23 Oct 2020 23:08:52 +0000 Subject: [PATCH] docs: [Automated] Regenerating documenation from Signed-off-by: TRTorch Github Bot --- ...asstrtorch_1_1CompileSpec_1_1DataType.html | 20 +- ...strtorch_1_1CompileSpec_1_1DeviceType.html | 18 +- ...trtorch_1_1ptq_1_1Int8CacheCalibrator.html | 2 +- ...classtrtorch_1_1ptq_1_1Int8Calibrator.html | 2 +- ...ile_cpp_api_include_trtorch_logging.h.html | 2 +- ...file_cpp_api_include_trtorch_macros.h.html | 2 +- .../file_cpp_api_include_trtorch_ptq.h.html | 2 +- ...ile_cpp_api_include_trtorch_trtorch.h.html | 10 +- ...8h_1a3eace458ae9122f571fabfc9ef1b9e3a.html | 2 +- ...8h_1af19cb866b0520fc84b69a1cf25a52b65.html | 5 +- docs/_cpp_api/namespace_trtorch.html | 10 +- docs/_cpp_api/namespace_trtorch__logging.html | 2 +- docs/_cpp_api/namespace_trtorch__ptq.html | 2 +- .../structtrtorch_1_1CompileSpec.html | 50 ++- ...ttrtorch_1_1CompileSpec_1_1InputRange.html | 2 +- docs/_notebooks/Resnet50-example.html | 334 +++++++------- docs/_notebooks/lenet-getting-started.html | 286 ++++++------ .../_notebooks/ssd-object-detection-demo.html | 320 +++++++------- docs/genindex.html | 408 ++---------------- docs/objects.inv | Bin 19356 -> 17157 bytes docs/searchindex.js | 2 +- docs/sitemap.xml | 2 +- 22 files changed, 594 insertions(+), 889 deletions(-) diff --git a/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DataType.html b/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DataType.html index 9ac5a87494..b560010f36 100644 --- a/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DataType.html +++ b/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DataType.html @@ -392,7 +392,7 @@

This class is a nested type of - + Struct CompileSpec @@ -458,7 +458,7 @@

Underlying enum class to support the - + DataType @@ -467,7 +467,7 @@

In the case that you need to use the - + DataType @@ -476,7 +476,7 @@

ex. trtorch::DataType type = - + DataType::kFloat @@ -612,7 +612,7 @@

- + DataType @@ -696,7 +696,7 @@

Get the enum value of the - + DataType @@ -775,7 +775,7 @@

Comparision operator for - + DataType @@ -865,7 +865,7 @@

Comparision operator for - + DataType @@ -951,7 +951,7 @@

Comparision operator for - + DataType @@ -1041,7 +1041,7 @@

Comparision operator for - + DataType diff --git a/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DeviceType.html b/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DeviceType.html index acb0528810..1d1772a40c 100644 --- a/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DeviceType.html +++ b/docs/_cpp_api/classtrtorch_1_1CompileSpec_1_1DeviceType.html @@ -392,7 +392,7 @@

This class is a nested type of - + Struct CompileSpec @@ -435,7 +435,7 @@

This class is compatable with c10::DeviceTypes (but will check for TRT support) but the only applicable value is at::kCUDA, which maps to - + DeviceType::kGPU @@ -443,7 +443,7 @@

To use the - + DataType @@ -452,7 +452,7 @@

ex. trtorch::DeviceType type = - + DeviceType::kGPU @@ -481,7 +481,7 @@

Underlying enum class to support the - + DeviceType @@ -490,7 +490,7 @@

In the case that you need to use the - + DeviceType @@ -499,7 +499,7 @@

ex. trtorch::DeviceType type = - + DeviceType::kGPU @@ -766,7 +766,7 @@

Comparison operator for - + DeviceType @@ -852,7 +852,7 @@

Comparison operator for - + DeviceType diff --git a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html index f043962260..d13bc517fd 100644 --- a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html +++ b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8CacheCalibrator.html @@ -865,7 +865,7 @@

Convience function to convert to a IInt8Calibrator* to easily be assigned to the ptq_calibrator field in - + CompileSpec diff --git a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html index 797c91a841..552fbd4d3d 100644 --- a/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html +++ b/docs/_cpp_api/classtrtorch_1_1ptq_1_1Int8Calibrator.html @@ -926,7 +926,7 @@

Convience function to convert to a IInt8Calibrator* to easily be assigned to the ptq_calibrator field in - + CompileSpec diff --git a/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html b/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html index 28c3b28c94..e54f3aab7c 100644 --- a/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html +++ b/docs/_cpp_api/file_cpp_api_include_trtorch_logging.h.html @@ -407,7 +407,7 @@

)

-

+

Contents

-

- ## 3. Creating TorchScript modules -

+
+
+

+ ## 3. Creating TorchScript modules +

+
+

To compile with TRTorch, the model must first be in @@ -1591,8 +1603,8 @@

-model = resnet50_model.eval().to("cuda")
-traced_model = torch.jit.trace(model, [torch.randn((128, 3, 224, 224)).to("cuda")])
+model = resnet50_model.eval().to("cuda")
+traced_model = torch.jit.trace(model, [torch.randn((128, 3, 224, 224)).to("cuda")])
 
@@ -1610,8 +1622,8 @@

-# This is just an example, and not required for the purposes of this demo
-torch.jit.save(traced_model, "resnet_50_traced.jit.pt")
+# This is just an example, and not required for the purposes of this demo
+torch.jit.save(traced_model, "resnet_50_traced.jit.pt")
 
@@ -1626,8 +1638,8 @@

-# Obtain the average time taken by a batch of input
-benchmark(traced_model, input_shape=(128, 3, 224, 224), nruns=1000)
+# Obtain the average time taken by a batch of input
+benchmark(traced_model, input_shape=(128, 3, 224, 224), nruns=1000)
 
@@ -1657,9 +1669,13 @@

-

- ## 4. Compiling with TRTorch -

+
+
+

+ ## 4. Compiling with TRTorch +

+
+

TorchScript modules behave just like normal PyTorch modules and are intercompatible. From TorchScript we can now compile a TensorRT based module. This module will still be implemented in TorchScript but all the computation will be done in TensorRT.

@@ -1685,15 +1701,15 @@

-import trtorch
+import trtorch
 
-# The compiled module will have precision as specified by "op_precision".
-# Here, it will have FP16 precision.
-trt_model_fp32 = trtorch.compile(traced_model, {
-    "input_shapes": [(128, 3, 224, 224)],
-    "op_precision": torch.float32, # Run with FP32
-    "workspace_size": 1 << 20
-})
+# The compiled module will have precision as specified by "op_precision".
+# Here, it will have FP16 precision.
+trt_model_fp32 = trtorch.compile(traced_model, {
+    "input_shapes": [(128, 3, 224, 224)],
+    "op_precision": torch.float32, # Run with FP32
+    "workspace_size": 1 << 20
+})
 
 
 
@@ -1710,8 +1726,8 @@

-# Obtain the average time taken by a batch of input
-benchmark(trt_model_fp32, input_shape=(128, 3, 224, 224), nruns=1000)
+# Obtain the average time taken by a batch of input
+benchmark(trt_model_fp32, input_shape=(128, 3, 224, 224), nruns=1000)
 
@@ -1757,15 +1773,15 @@

-import trtorch
+import trtorch
 
-# The compiled module will have precision as specified by "op_precision".
-# Here, it will have FP16 precision.
-trt_model = trtorch.compile(traced_model, {
-    "input_shapes": [(128, 3, 224, 224)],
-    "op_precision": torch.half, # Run with FP16
-    "workspace_size": 1 << 20
-})
+# The compiled module will have precision as specified by "op_precision".
+# Here, it will have FP16 precision.
+trt_model = trtorch.compile(traced_model, {
+    "input_shapes": [(128, 3, 224, 224)],
+    "op_precision": torch.half, # Run with FP16
+    "workspace_size": 1 << 20
+})
 
 
@@ -1781,8 +1797,8 @@

-# Obtain the average time taken by a batch of input
-benchmark(trt_model, input_shape=(128, 3, 224, 224), dtype='fp16', nruns=1000)
+# Obtain the average time taken by a batch of input
+benchmark(trt_model, input_shape=(128, 3, 224, 224), dtype='fp16', nruns=1000)
 
@@ -1812,9 +1828,13 @@

-

- ## 5. Conclusion -

+
+
+

+ ## 5. Conclusion +

+
+

In this notebook, we have walked through the complete process of compiling TorchScript models with TRTorch for ResNet-50 model and test the performance impact of the optimization. With TRTorch, we observe a speedup of @@ -1826,9 +1846,9 @@

with FP16.

-

+

What’s next - +

diff --git a/docs/_notebooks/lenet-getting-started.html b/docs/_notebooks/lenet-getting-started.html index 034ee524aa..99c0d6f8f1 100644 --- a/docs/_notebooks/lenet-getting-started.html +++ b/docs/_notebooks/lenet-getting-started.html @@ -326,7 +326,7 @@