Skip to content

Commit

Permalink
Merge pull request #29 from narendasan/mobilenet
Browse files Browse the repository at this point in the history
Add operators necessary for mobilenet
  • Loading branch information
narendasan authored Mar 31, 2020
2 parents 79c909c + ca42ef5 commit aef6003
Show file tree
Hide file tree
Showing 33 changed files with 841 additions and 323 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ experiments/
py/build/
py/tmp/
py/.eggs
.vscode/
66 changes: 34 additions & 32 deletions core/conversion/conversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ namespace trtorch {
namespace core {
namespace conversion {

// Defined in core/conversion/conversion_blacklist.cpp
// Defined in core/conversion/conversion_blacklist.cpp
bool isNodeConversionBlacklisted(const torch::jit::Node* n);

bool OpSupported(const torch::jit::Node* n) {
Expand All @@ -24,8 +24,8 @@ c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::
// Also probably a better way to deal with the two error cases;
TRTORCH_CHECK(level < limit, "Failed to evaluate node: " << *n \
<< "Reason: Exceeded evaluation stack limit (limit=" \
<< limit << ")");
<< limit << ")");

LOG_DEBUG(ctx->logger, "Evaluating " << util::node_info(n));
evaluators::kwargs eval_args;
for (auto eval_in : n->inputs()) {
Expand Down Expand Up @@ -55,7 +55,7 @@ c10::optional<torch::jit::IValue> EvaluateNode(ConversionCtx* ctx, const torch::
return eval;
}

bool AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
void AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
LOG_INFO(ctx->logger,
"Adding Layer " << util::node_info(n) << " (ctx.AddLayer)");
converters::args node_args;
Expand Down Expand Up @@ -87,36 +87,34 @@ bool AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
TRTORCH_THROW_ERROR("Unable to retrieve all node inputs for node: " \
<< util::node_info(n) << " (ctx.AddLayer)\nSpecifically failed to retrieve value for input: " \
<< *input_node);
return false;
}

}

if (n->inputs().size() != node_args.size()) {
TRTORCH_THROW_ERROR("Unable to retrieve all node inputs for node: " << *n);
return false;
}


auto schema = n->maybeSchema();
TRTORCH_CHECK(schema, "Unable to get schema for Node " << util::node_info(n) \
<< " (conversion.AddLayer)");

auto converter = converters::get_node_converter_for(schema);
TRTORCH_CHECK(converter, "Unable to convert node: " << util::node_info(n) \
<< " (conversion.AddLayer)\nSchema: " << *schema
<< "\nConverter for " << schema->name()
<< " requested, but no such converter was found.\nIf you need a converter for this operator, you can try implementing one yourself\n"
<< "or request a converter: https://www.github.com/NVIDIA/TRTorch/issues");
converter(ctx, n, node_args);
<< "or request a converter: https://www.github.com/NVIDIA/TRTorch/issues");

return true;
TRTORCH_CHECK(converter(ctx, n, node_args),
"Converter for " << *schema << " failed to convert node: "
<< util::node_info(n) << "please report this error to https://www.github.com/NVIDIA/TRTorch/issues");
}

bool AddInputs(ConversionCtx* ctx,
void AddInputs(ConversionCtx* ctx,
at::ArrayRef<const torch::jit::Value*> inputs,
std::vector<InputRange>& input_dims) {

auto type_lut = torch::jit::script::string_to_type_lut();
std::vector<const torch::jit::Value*> input_tensors;
for (auto in : inputs) {
Expand All @@ -130,15 +128,15 @@ bool AddInputs(ConversionCtx* ctx,
input_tensors.push_back(in);
}
}

TRTORCH_CHECK(input_tensors.size() == input_dims.size(),
"Expected dimension specifications for all input tensors" \
<< ", but found " << input_tensors.size() \
<< " input tensors and " \
<< input_dims.size() << "dimension specs (conversion.AddInputs)");

auto profile = ctx->builder->createOptimizationProfile();

for (size_t i = 0; i < input_tensors.size(); i++) {
auto in = input_tensors[i];
auto dims = input_dims[i];
Expand All @@ -158,20 +156,23 @@ bool AddInputs(ConversionCtx* ctx,
}

TRTORCH_CHECK(profile->isValid(), "Optimization profile is invalid, please check the input range provided (conversion.AddInputs)");

ctx->cfg->addOptimizationProfile(profile);
return true;
}

bool MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outputs) {
for (auto out : outputs) {
ctx->net->markOutput(*(ctx->value_tensor_map[out]));
auto it = ctx->value_tensor_map.find(out);
// Leaves the potential for unused outputs to be populated with nullptr "safely"
TRTORCH_CHECK(it != ctx->value_tensor_map.end() && it->second,
"No corresponding output TRT Tensor found for TorchScript output: " << out->debugName());
auto out_tensor = it->second;
ctx->net->markOutput(*out_tensor);
LOG_INFO(ctx->logger,
"Marking Output " << out->debugName() << " (ctx.MarkOutput)");
}
return true;
}

void AddParamsToCtxValueMap(ConversionCtx* ctx, GraphParams& params) {
for (auto p : params) {
ctx->evaluated_value_map[p.first] = torch::jit::IValue(p.second.clone());
Expand All @@ -191,13 +192,8 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
bool to_eval = evaluators::shouldEvalAtConversionTime(n);
bool blacklisted = isNodeConversionBlacklisted(n);
if (!to_eval && !blacklisted) {
if (!AddLayer(ctx, n)) {
//TODO: Exception things
LOG_ERROR(ctx->logger,
"Failed to add layer: " << *n \
<< " (ctx.AddLayer)");
return;
}
// Should error out if something fails
AddLayer(ctx, n);
} else {
std::string reason = "";
if (to_eval) {
Expand All @@ -207,7 +203,13 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
reason += " (explicitly blacklisted)";
}
LOG_DEBUG(ctx->logger,
"Skipping Node: " << (n->kind().toQualString()) << reason);
"Skipping Node: " << util::node_info(n) << reason);
}
}

for (const auto n : nodes) {
if (converters::node_is_convertable(n)) {
ctx->CheckLayerAddition(n);
}
}

Expand All @@ -218,7 +220,7 @@ void ConvertBlockToNetDef(ConversionCtx* ctx, const torch::jit::Block* b, ExtraI
// Converts a already lowered block (blocks with no sub blocks) to
// a serialized TensorRT engine that can be deserialized and run

// Probably should consolidate these two functions
// Probably should consolidate these two functions
std::string ConvertBlockToEngine(const torch::jit::Block* b, ExtraInfo build_info, GraphParams& static_params) {
ConversionCtx ctx(build_info.engine_settings);
ConvertBlockToNetDef(&ctx, b, build_info, static_params);
Expand Down Expand Up @@ -247,7 +249,7 @@ bool VerifyConverterSupportForBlock(const torch::jit::Block* b) {
for (auto s : unsupported_ops) {
unsupported_msg << " - " << s << std::endl;
}
unsupported_msg << "You can either implement converters for these ops in your application or file a bug" << std::endl;
unsupported_msg << "You can either implement converters for these ops in your application or request implementation" << std::endl;
unsupported_msg << "https://www.github.com/nvidia/TRTorch/issues" << std::endl;
LOG_ERROR(unsupported_msg.str());
}
Expand Down
23 changes: 20 additions & 3 deletions core/conversion/conversionctx/ConversionCtx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,11 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
switch(settings.op_precision) {
case nvinfer1::DataType::kHALF:
cfg->setFlag(nvinfer1::BuilderFlag::kFP16);
input_type = nvinfer1::DataType::kHALF;
input_type = nvinfer1::DataType::kHALF;
break;
// case nvinfer1::DataType::kINT8:
// cfg->setFlag(nvinfer1::BuilderFlag::kINT8);
// input_type = nvinfer1::DataType::kFLOAT;
// input_type = nvinfer1::DataType::kFLOAT;
// break;
case nvinfer1::DataType::kFLOAT:
default:
Expand Down Expand Up @@ -80,13 +80,30 @@ ConversionCtx::~ConversionCtx() {
free(ptr);
}
}


nvinfer1::ITensor* ConversionCtx::AssociateValueAndTensor(const torch::jit::Value* value, nvinfer1::ITensor* tensor) {
tensor->setName(value->debugName().c_str());
this->value_tensor_map[value] = tensor;
return tensor;
}

std::string ConversionCtx::SerializeEngine() {
auto engine = builder->buildEngineWithConfig(*net, *cfg);
auto serialized_engine = engine->serialize();
return std::string((const char*)serialized_engine->data(), serialized_engine->size());
}

bool ConversionCtx::CheckLayerAddition(const torch::jit::Node* n) {
for (auto out : n->outputs()) {
auto iter = this->value_tensor_map.find(out);
if (iter == this->value_tensor_map.end()) {
LOG_WARNING("Node " << util::node_info(n) << " output: " << out->debugName() << " does not have a coresponding output, may potentially indicate a defective converter");
return false;
}
}
return true;
}

} // namespace conversion
} // namespace core
} // namespace trtorch
13 changes: 8 additions & 5 deletions core/conversion/conversionctx/ConversionCtx.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,15 @@ struct BuilderSettings {

BuilderSettings() = default;
BuilderSettings(const BuilderSettings& other) = default;
friend std::ostream& operator<<(std::ostream& os, const BuilderSettings& s);
friend std::ostream& operator<<(std::ostream& os, const BuilderSettings& s);
};

struct ConversionCtx {
ConversionCtx(BuilderSettings settings);
std::string SerializeEngine();
nvinfer1::ITensor* AssociateValueAndTensor(const torch::jit::Value* value, nvinfer1::ITensor* tensor);
bool CheckLayerAddition(const torch::jit::Node* n);

~ConversionCtx();

nvinfer1::IBuilder* builder;
Expand All @@ -50,12 +53,12 @@ struct ConversionCtx {
// is constructed from a PyTorch Tensor it allocates the data here to store a
// copy of the values
std::vector<void*> builder_resources;

std::unordered_map<const torch::jit::Value*, nvinfer1::ITensor*> value_tensor_map;
std::unordered_map<const torch::jit::Value*, torch::jit::IValue> evaluated_value_map;
};

} // namespace conversion
} // namespace conversion
} // namespace core
} // namespace trtorch

49 changes: 32 additions & 17 deletions core/conversion/converters/Arg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,9 @@ std::string Arg::type_name() const {
default:
return "None";
}

}

const torch::jit::IValue* Arg::IValue() const {
if (type_ == Type::kIValue) {
return ptr_.ivalue;
Expand Down Expand Up @@ -150,7 +150,7 @@ double Arg::unwrapToDouble(double default_val) {

double Arg::unwrapToDouble() {
return this->unwrapTo<double>();
}
}

bool Arg::unwrapToBool(bool default_val) {
return this->unwrapTo<bool>(default_val);
Expand Down Expand Up @@ -194,26 +194,41 @@ c10::List<bool> Arg::unwrapToBoolList() {

template<typename T>
T Arg::unwrapTo(T default_val) {
if (isIValue()) {
// TODO: implement Tag Checking
return ptr_.ivalue->to<T>();
try {
return this->unwrapTo<T>();
} catch(trtorch::Error& e) {
LOG_DEBUG("In arg unwrapping, returning default value provided (" << e.what() << ")");
return default_val;
}
LOG_DEBUG("In arg unwrapping, returning default value provided");
return default_val;
}


template<typename T>
T Arg::unwrapTo() {
if (isIValue()) {
//TODO: Implement Tag checking
return ptr_.ivalue->to<T>();
//TODO: Exception
//LOG_INTERNAL_ERROR("Requested unwrapping of arg IValue assuming it was " << typeid(T).name() << " however type is " << ptr_.ivalue->type());

TRTORCH_CHECK(isIValue(), "Requested unwrapping of arg assuming it was an IValue, however arg type is " << type_name());
auto ivalue = ptr_.ivalue;
bool correct_type = false;
if (typeid(T) == typeid(double)) {
correct_type = ivalue->isDouble();
} else if (typeid(T) == typeid(bool)) {
correct_type = ivalue->isBool();
} else if (typeid(T) == typeid(int64_t)) {
correct_type = ivalue->isInt();
} else if (typeid(T) == typeid(at::Tensor)) {
correct_type = ivalue->isTensor();
} else if (typeid(T) == typeid(c10::Scalar)) {
correct_type = ivalue->isScalar();
} else if (typeid(T) == typeid(c10::List<int64_t>)) {
correct_type = ivalue->isIntList();
} else if (typeid(T) == typeid(c10::List<double>)) {
correct_type = ivalue->isDoubleList();
} else if (typeid(T) == typeid(c10::List<bool>)) {
correct_type = ivalue->isBoolList();
} else {
TRTORCH_THROW_ERROR("Requested unwrapping of arg to an unsupported type: " << typeid(T).name());
}
TRTORCH_THROW_ERROR("Requested unwrapping of arg assuming it was an IValue, however arg type is " << type_name());
return T();

TRTORCH_CHECK(correct_type, "Requested unwrapping of arg IValue assuming it was " << typeid(T).name() << " however type is " << *(ptr_.ivalue->type()));
return ptr_.ivalue->to<T>();
}


Expand Down
1 change: 1 addition & 0 deletions core/conversion/converters/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ cc_library(
"impl/element_wise.cpp",
"impl/linear.cpp",
"impl/pooling.cpp",
"impl/reduce.cpp",
"impl/softmax.cpp",
"impl/unary.cpp",
],
Expand Down
18 changes: 9 additions & 9 deletions core/conversion/converters/NodeConverterRegistry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,20 @@ std::string canonical_schema_string(const torch::jit::FunctionSchema& schema) {
}

namespace {
using ConverterLUT = std::unordered_map<torch::jit::Symbol, OpConverter>;
using ConverterLUT = std::unordered_map<c10::OperatorName, OpConverter>;

class NodeConverterRegistry {
public:
bool RegisterConverter(torch::jit::FunctionSchema* signature, OpConverter& converter) {
LOG_DEBUG("Registering Converter for " << canonical_schema_string(*signature));
auto sym = torch::jit::Symbol::fromQualString(signature->name());
converter_lut_[sym] = std::move(converter);
auto name = signature->operator_name();
converter_lut_[name] = std::move(converter);
return true;
}

OpConverter GetConverter(const torch::jit::FunctionSchema* signature) {
auto sym = torch::jit::Symbol::fromQualString(signature->name());
auto iter = converter_lut_.find(sym);
auto name = signature->operator_name();
auto iter = converter_lut_.find(name);
if (iter == converter_lut_.end()) {
LOG_ERROR("Requested converter for " << signature->name() << ", but no such converter was found");
// ASK: Is there a better way than returning a nullptr?
Expand All @@ -66,8 +66,8 @@ class NodeConverterRegistry {
bool Convertable(const torch::jit::Node* n) {
auto schema = n->maybeSchema();
if (schema) {
auto sym = torch::jit::Symbol::fromQualString(schema->name());
auto iter = converter_lut_.find(sym);
auto name = schema->operator_name();
auto iter = converter_lut_.find(name);
if (iter == converter_lut_.end()) {
return false;
} else {
Expand All @@ -79,7 +79,7 @@ class NodeConverterRegistry {
return false;
}
}

private:
ConverterLUT converter_lut_;
};
Expand Down Expand Up @@ -111,7 +111,7 @@ OpConverter get_node_converter_for(const torch::jit::FunctionSchema* signature)
bool node_is_convertable(const torch::jit::Node* n) {
return get_converter_registry().Convertable(n);
}

RegisterNodeConversionPatterns&& RegisterNodeConversionPatterns::pattern(ConversionPattern p) && {
register_node_converter(std::move(p));
return std::move(*this);
Expand Down
Loading

0 comments on commit aef6003

Please sign in to comment.