Skip to content

Commit

Permalink
[CPU] Convert, NMS nodes migration on nGraph (#37)
Browse files Browse the repository at this point in the history
  • Loading branch information
Maxim Andronov committed Apr 30, 2021
1 parent c3b371d commit 5432bc1
Show file tree
Hide file tree
Showing 8 changed files with 132 additions and 127 deletions.
4 changes: 2 additions & 2 deletions inference-engine/src/mkldnn_plugin/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ set(LAYERS
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_bin_conv_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_concat_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_conv_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_convert_node.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_crop_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_deconv_node.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_def_conv_node.cpp
Expand Down Expand Up @@ -50,7 +51,6 @@ set(LAYERS
${CMAKE_CURRENT_SOURCE_DIR}/nodes/list.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/batch_to_space.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/broadcast.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/convert.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/ctc_greedy.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/ctc_loss.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/depth_to_space.cpp
Expand All @@ -67,7 +67,7 @@ set(LAYERS
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/gather_nd.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/gather_tree.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/grn.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/non_max_suppression.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/non_max_suppression.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/log_softmax.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/math.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/one_hot.cpp
Expand Down
2 changes: 1 addition & 1 deletion inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
// { "Loop", TensorIterator },
// { "MemoryInput", MemoryInput}, // for construction from name ctor, arbitrary name is used
// { "Memory", MemoryOutput }, // for construction from layer ctor
// { "Convert", Convert },
{ "Convert", Convert },
{ "MVN", MVN},
{ "NormalizeL2", NormalizeL2},
{ "ScatterUpdate", ScatterUpdate},
Expand Down
4 changes: 4 additions & 0 deletions inference-engine/src/mkldnn_plugin/mkldnn_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -591,6 +591,10 @@ class MKLDNNNode : public InferenceEngine::details::no_copy {
return originalInputPrecisions.size();
}

size_t getOriginalOutputsNumber() const {
return originalOutputPrecisions.size();
}

Algorithm getAlgorithm() const {
return algorithm;
}
Expand Down
6 changes: 6 additions & 0 deletions inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@
#include <transformations/op_conversions/log_softmax_decomposition.hpp>
#include <transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp>
#include <transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp>
#include <transformations/op_conversions/convert_previous_nms_to_nms_5.hpp>
#include <transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp>
#include <transformations/convert_precision.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/rt_info/fused_names_attribute.hpp>
Expand Down Expand Up @@ -128,6 +130,10 @@ static void Transformation(CNNNetwork& clonedNetwork, const Config& conf) {
manager.register_pass<ngraph::pass::LSTMCellDecomposition>();
manager.register_pass<ngraph::pass::GRUCellDecomposition>();
manager.register_pass<ngraph::pass::RNNCellDecomposition>();
manager.register_pass<ngraph::pass::ConvertNMS1ToNMS5>();
manager.register_pass<ngraph::pass::ConvertNMS3ToNMS5>();
manager.register_pass<ngraph::pass::ConvertNMS4ToNMS5>();
manager.register_pass<ngraph::pass::ConvertNMSToNMSIEInternal>();

std::vector<std::pair<ngraph::element::Type, ngraph::element::Type>> convert_precision_list{
{ngraph::element::i64, ngraph::element::i32},
Expand Down
2 changes: 1 addition & 1 deletion inference-engine/src/mkldnn_plugin/nodes/list_tbl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ MKLDNN_EXTENSION_NODE(PSROIPoolingImpl, DeformablePSROIPooling);
//MKLDNN_EXTENSION_NODE(SparseToDenseImpl, SparseToDense);
//MKLDNN_EXTENSION_NODE(ExperimentalDetectronROIFeatureExtractorImpl, ExperimentalDetectronROIFeatureExtractor);
//MKLDNN_EXTENSION_NODE(ONNXCustomProposalImpl, ExperimentalDetectronGenerateProposalsSingleImage);
//MKLDNN_EXTENSION_NODE(NonMaxSuppressionImpl, NonMaxSuppression);
MKLDNN_EXTENSION_NODE(NonMaxSuppressionImpl, NonMaxSuppressionIEInternal);
MKLDNN_EXTENSION_NODE(TopKImpl, TopK);
//MKLDNN_EXTENSION_NODE(ShuffleChannelsImpl, ShuffleChannels);
//MKLDNN_EXTENSION_NODE(SpaceToDepthImpl, SpaceToDepth);
Expand Down
60 changes: 35 additions & 25 deletions inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,34 @@
#include "mkldnn_convert_node.h"
#include "common/cpu_convert.h"
#include "common/tensor_desc_creator.h"

#define THROW_ERROR IE_THROW() << getTypeStr() << " layer with name '" << getName() <<"' ERROR: "
#include <ngraph/opsets/opset1.hpp>

using namespace mkldnn;
using namespace MKLDNNPlugin;
using namespace InferenceEngine;

bool MKLDNNConvertNode::isSupportedOperation(const std::shared_ptr<ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
const auto convert = std::dynamic_pointer_cast<const ngraph::opset1::Convert>(op);
if (!convert) {
errorMessage = "Only opset1 Convert operation is supported";
return false;
}
} catch (...) {
return false;
}
return true;
}

MKLDNNConvertNode::MKLDNNConvertNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) :
MKLDNNNode(op, eng, cache) {}
MKLDNNNode(op, eng, cache) {
std::string errorMessage;
if (isSupportedOperation(op, errorMessage)) {
errorPrefix = "Convert node with name '" + getName() + "'";
} else {
IE_THROW(NotImplemented) << errorMessage;
}
}

void MKLDNNConvertNode::getSupportedDescriptors() {
// if tensor descriptors are set via setDescs method we need to update the inDims/outDims data
Expand All @@ -24,20 +43,15 @@ void MKLDNNConvertNode::getSupportedDescriptors() {
if (inDims.empty() && input && input->getLayout() != InferenceEngine::Layout::ANY)
inDims.push_back(MKLDNNDims(input->getDims()));
if (getParentEdges().size() != 1)
THROW_ERROR << "Incorrect number of input edges";
IE_THROW() << errorPrefix << " has incorrect number of input edges";
if (getChildEdges().empty())
THROW_ERROR << "Incorrect number of output edges";
IE_THROW() << errorPrefix << " has incorrect number of output edges";
}

void MKLDNNConvertNode::initSupportedPrimitiveDescriptors() {
if (!supportedPrimitiveDescriptors.empty())
return;

auto layer = getCnnLayer();
if (layer == nullptr) {
THROW_ERROR << "Cannot get CNN layer";
}

LayerConfig config;
DataConfig dataIn;
DataConfig dataConfigOut;
Expand All @@ -54,16 +68,11 @@ void MKLDNNConvertNode::initSupportedPrimitiveDescriptors() {
dataConfigOut.desc = TensorDesc(output->getPrecision(), input->getDims(), blockingDesc);
config.outConfs.push_back(dataConfigOut);
supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::unknown, MKLDNNMemoryDesc(config.outConfs.front().desc).getFormat());
} else if (layer->insData.size() == 1 && layer->outData.size() == 1) {
auto insData = layer->insData[0].lock();
if (nullptr == insData) {
THROW_ERROR << "Input data is empty";
}

const SizeVector& insDims = insData->getTensorDesc().getDims();
auto insPrecision = insData->getTensorDesc().getPrecision();
const SizeVector& outputDims = layer->outData[0]->getTensorDesc().getDims();
auto outPrecision = layer->outData[0]->getTensorDesc().getPrecision();
} else if (getOriginalInputsNumber() == 1 && getOriginalOutputsNumber() == 1) {
const SizeVector& insDims = getParentEdgeAt(0)->getDims().ToSizeVector();
auto insPrecision = getOriginalInputPrecisionAtPort(0);
const SizeVector& outputDims = getChildEdgeAt(0)->getDims().ToSizeVector();
auto outPrecision = getOriginalOutputPrecisionAtPort(0);

config.inConfs.push_back(dataIn);
config.outConfs.push_back(dataConfigOut);
Expand All @@ -78,26 +87,26 @@ void MKLDNNConvertNode::initSupportedPrimitiveDescriptors() {
supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::unknown, MKLDNNMemoryDesc(config.outConfs.front().desc).getFormat());
}
} else {
THROW_ERROR << "Incorrect number of input/output edges";
IE_THROW() << errorPrefix << " has incorrect number of input/output edges";
}
}

void MKLDNNConvertNode::createPrimitive() {
auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
THROW_ERROR << "Destination memory didn't allocate.";
IE_THROW() << errorPrefix << " has not allocated destination memory";
if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr())
THROW_ERROR << "Input memory didn't allocate.";
IE_THROW() << errorPrefix << " has not allocated input memory";
if (getSelectedPrimitiveDescriptor() == nullptr)
THROW_ERROR << "Preferable primitive descriptor is not set.";
IE_THROW() << errorPrefix << " has nullable preferable primitive descriptor";
}

void MKLDNNConvertNode::execute(mkldnn::stream strm) {
auto& parentMem = getParentEdgeAt(0)->getMemory();
auto& childMem = getChildEdgeAt(0)->getMemory();
if (parentMem.GetElementsCount() != childMem.GetElementsCount())
THROW_ERROR << "Input and output buffers have different elements count";
IE_THROW() << errorPrefix << " has different elements number in input and output buffers";

void* srcPtr = parentMem.GetPtr();
void* dstPtr = childMem.GetPtr();
Expand All @@ -107,4 +116,5 @@ void MKLDNNConvertNode::execute(mkldnn::stream strm) {
bool MKLDNNConvertNode::created() const {
return getType() == Convert;
}

REG_MKLDNN_PRIM_FOR(MKLDNNConvertNode, Convert);
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,13 @@ class MKLDNNConvertNode : public MKLDNNNode {
std::shared_ptr<const InferenceEngine::TensorDesc> getInput() const { return input; }
std::shared_ptr<const InferenceEngine::TensorDesc> getOutput() const { return output; }

static bool isSupportedOperation(const std::shared_ptr<ngraph::Node>& op, std::string& errorMessage) noexcept;

private:
std::shared_ptr<InferenceEngine::TensorDesc> input;
std::shared_ptr<InferenceEngine::TensorDesc> output;

std::string errorPrefix;
};
} // namespace MKLDNNPlugin

Loading

0 comments on commit 5432bc1

Please sign in to comment.