Skip to content

Commit

Permalink
fix(): fixed FP16 bug, fixed README, addressed some other PR comments
Browse files Browse the repository at this point in the history
Signed-off-by: Abhiram Iyer <abhirami@nvidia.com>

Signed-off-by: Abhiram Iyer <abhi.iyer.ai@gmail.com>
  • Loading branch information
abhi-iyer committed Jun 19, 2020
1 parent 7794c78 commit d9c0e84
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 20 deletions.
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,6 @@ You can register a converter for your op using the `NodeConverterRegistry` insid

## Known Limitations

- You cannot use both Adaptive Pooling in PyTorch and also use TRTorch Dynamic input shape (follow [#49](https://github.com/NVIDIA/TRTorch/issues/49) for the latest on the issue)

## Structure of the repo

| Component | Description |
Expand Down
2 changes: 0 additions & 2 deletions core/conversion/converters/impl/interpolate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
#include "NvInfer.h"
#include "NvInferRuntimeCommon.h"

#include <tuple>

namespace trtorch {
namespace core {
namespace conversion {
Expand Down
28 changes: 12 additions & 16 deletions core/conversion/converters/impl/plugins/interpolate_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,15 +134,20 @@ size_t InterpolatePlugin::getSerializationSize() const {
}

bool InterpolatePlugin::supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs, int nbOutputs) {
if (inOut->format != nvinfer1::TensorFormat::kLINEAR) {
return false;
}
TRTORCH_ASSERT(0 <= pos && pos <= 1, "There should be exactly 2 connections to the plugin - 1 input, 1 output");
TRTORCH_ASSERT(nbInputs == 1, "Expected a single tensor as input to interpolate plugin");
TRTORCH_ASSERT(nbOutputs == 1, "Expected a single tensor as output to interpolate plugin");

if (inOut->type == DataType::kINT32 || inOut->type == DataType::kINT8) {
return false;
const PluginTensorDesc& in = inOut[0];

if (pos == 0) {
return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR);
}

return true;
// pos == 1, accessing information about output tensor
const PluginTensorDesc& out = inOut[1];

return (in.type == out.type) && (in.format == out.format);
}

void InterpolatePlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs, const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) {
Expand All @@ -156,16 +161,7 @@ size_t InterpolatePlugin::getWorkspaceSize(const nvinfer1::PluginTensorDesc* inp
int InterpolatePlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void *const *inputs,
void *const *outputs, void *workspace,
cudaStream_t stream) {
at::Tensor input;

if (mode == "adaptive_pool2d") {
// use dynamically inferred input shape (for pooling)
input = at::from_blob((void*) inputs[0], util::toVec(inputDesc->dims), [](void*){}, tensor_options);
} else {
// use precomputed input shape (for interpolation/upsampling)
input = at::from_blob((void*) inputs[0], in_shape, [](void*){}, tensor_options);
}

at::Tensor input = at::from_blob((void*) inputs[0], util::toVec(inputDesc->dims), [](void*){}, tensor_options);
at::Tensor output = at::from_blob(outputs[0], out_shape, [](void*){}, tensor_options);

at::cuda::CUDAStream torch_stream = at::cuda::getStreamFromPool();
Expand Down

0 comments on commit d9c0e84

Please sign in to comment.