Skip to content

Commit

Permalink
[FX] disable 2 of conv3d and type_as tests (#1224)
Browse files Browse the repository at this point in the history
* Update test_convolution.py

* Update test_type_as.py

* Update config.yml

* Update test_convolution.py

* c++ format fix

* manual fix format for one file
  • Loading branch information
Wei authored Aug 5, 2022
1 parent 84ffb67 commit ec2cdfb
Show file tree
Hide file tree
Showing 13 changed files with 174 additions and 161 deletions.
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -747,7 +747,7 @@ parameters:
# Nightly platform config
torch-nightly-build:
type: string
default: "1.13.0.dev20220715+cu113"
default: "1.13.0.dev20220731+cu113"
torch-nightly-build-index:
type: string
default: "https://download.pytorch.org/whl/nightly/cu113"
Expand Down
8 changes: 4 additions & 4 deletions core/conversion/conversionctx/ConversionCtx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
}

cfg->setAvgTimingIterations(settings.num_avg_timing_iters);
if (settings.workspace_size != 0){
if (settings.workspace_size != 0) {
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, settings.workspace_size);
}

Expand All @@ -124,13 +124,13 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
settings.enabled_precisions.find(nvinfer1::DataType::kFLOAT) == settings.enabled_precisions.end(),
"DLA supports only fp16 or int8 precision");
cfg->setDLACore(settings.device.dla_core);
if (settings.dla_sram_size != 1048576){
if (settings.dla_sram_size != 1048576) {
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_MANAGED_SRAM, settings.dla_sram_size);
}
if (settings.dla_local_dram_size != 1073741824){
if (settings.dla_local_dram_size != 1073741824) {
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_LOCAL_DRAM, settings.dla_local_dram_size);
}
if (settings.dla_global_dram_size != 536870912){
if (settings.dla_global_dram_size != 536870912) {
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_GLOBAL_DRAM, settings.dla_global_dram_size);
}
}
Expand Down
21 changes: 12 additions & 9 deletions core/conversion/converters/converter_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,13 +207,13 @@ nvinfer1::ITensor* clamp(
nvinfer1::ITensor* lower_bound,
nvinfer1::ITensor* upper_bound,
std::string const& name) {

auto max_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMAX, x, lower_bound, "max layer for " + name);
TORCHTRT_CHECK(max_layer, "Unable to create max layer for clamp");
LOG_DEBUG(ctx->logger, "Create " << max_layer->getName() << " for clamp");
auto max_itensor = max_layer->getOutput(0);

auto min_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
auto min_layer =
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
TORCHTRT_CHECK(min_layer, "Unable to create min layer for clamp");
LOG_DEBUG(ctx->logger, "Create " << min_layer->getName() << " for clamp");
auto min_itensor = min_layer->getOutput(0);
Expand All @@ -227,13 +227,13 @@ nvinfer1::ITensor* clamp_to_input_dim(
nvinfer1::ITensor* input_dim,
int nbdims,
std::string const& name) {

auto zero = torch::zeros({nbdims}).to(torch::kI32);
auto zero_itensor = tensor_to_const(ctx, zero);
auto one = torch::ones({nbdims}).to(torch::kI32);
auto one_itensor = tensor_to_const(ctx, one);

auto upper_bound_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, input_dim, one_itensor, "sub layer for " + name);
auto upper_bound_layer =
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, input_dim, one_itensor, "sub layer for " + name);
TORCHTRT_CHECK(upper_bound_layer, "Unable to create sub layer for clamp to inputDim");
LOG_DEBUG(ctx->logger, "Create " << upper_bound_layer->getName() << " for clamp to inputDim");
auto upper_bound = upper_bound_layer->getOutput(0);
Expand All @@ -243,7 +243,8 @@ nvinfer1::ITensor* clamp_to_input_dim(
LOG_DEBUG(ctx->logger, "Create " << max_layer->getName() << " for clamp to inputDim");
auto max_itensor = max_layer->getOutput(0);

auto min_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
auto min_layer =
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
TORCHTRT_CHECK(min_layer, "Unable to create min_layer for clamp to inputDim");
LOG_DEBUG(ctx->logger, "Create " << min_layer->getName() << " for clamp to inputDim");
auto min_itensor = min_layer->getOutput(0);
Expand All @@ -257,7 +258,6 @@ nvinfer1::ITensor* normalize_indices(
nvinfer1::ITensor* indices,
int nbdims,
std::string const& name) {

auto zero = torch::zeros({nbdims}).to(torch::kI32);
auto neg = -torch::ones({nbdims}).to(torch::kI32);
auto zero_itensor = tensor_to_const(ctx, zero);
Expand Down Expand Up @@ -307,17 +307,20 @@ nvinfer1::ITensor* get_slice_size(
at::Tensor one_tensor = torch::ones({nbdims}).to(torch::kI32);
auto one_itensor = tensor_to_const(ctx, one_tensor);

auto sub_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, end, start, "get_slice_size sub layer for " + name);
auto sub_layer =
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, end, start, "get_slice_size sub layer for " + name);
TORCHTRT_CHECK(sub_layer, "Unable to create sub layer in calculate_output_size");
LOG_DEBUG(ctx->logger, "Create " << sub_layer->getName() << " for calculate_output_size");
auto sub_itensor = sub_layer->getOutput(0);

auto div_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, sub_itensor, stride, "get_slice_size div layer for " + name);
auto div_layer = add_elementwise(
ctx, nvinfer1::ElementWiseOperation::kDIV, sub_itensor, stride, "get_slice_size div layer for " + name);
TORCHTRT_CHECK(div_layer, "Unable to create div layer in calculate_output_size");
LOG_DEBUG(ctx->logger, "Create " << div_layer->getName() << " for calculate_output_size");
auto div_itensor = div_layer->getOutput(0);

auto add_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUM, div_itensor, one_itensor, "get_slice_size sum layer for " + name);
auto add_layer = add_elementwise(
ctx, nvinfer1::ElementWiseOperation::kSUM, div_itensor, one_itensor, "get_slice_size sum layer for " + name);
TORCHTRT_CHECK(add_layer, "Unable to create add layer in calculate_output_size");
LOG_DEBUG(ctx->logger, "Create " << add_layer->getName() << " for calculate_output_size");
auto size_itensor = add_layer->getOutput(0);
Expand Down
2 changes: 1 addition & 1 deletion core/conversion/converters/converter_util.h
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#pragma once

#include <limits>
#include <map>
#include <string>
#include <limits>

#include "core/conversion/conversionctx/ConversionCtx.h"
#include "core/conversion/converters/Weights.h"
Expand Down
Loading

0 comments on commit ec2cdfb

Please sign in to comment.