Skip to content

Commit

Permalink
Added tests for custom operation
Browse files Browse the repository at this point in the history
  • Loading branch information
ilyachur committed Jun 24, 2021
1 parent cdda7d7 commit 06d1a13
Show file tree
Hide file tree
Showing 8 changed files with 206 additions and 9 deletions.
10 changes: 3 additions & 7 deletions docs/template_extension/cpu_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node>& node) {
IE_THROW() << "Cannot create implementation for operation with incorrect number of inputs or outputs!";
if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic())
IE_THROW() << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4)
IE_THROW() << "Operation supports only 4d tensors for input and output.";
if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
IE_THROW() << "Operation supports only FP32 tensors.";
add = castedNode->getAddAttr();
Expand All @@ -41,7 +39,9 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
config.dynBatchSupport = false;
InferenceEngine::DataConfig inData;
InferenceEngine::DataConfig outData;
InferenceEngine::SizeVector order = {0, 1, 2, 3};
InferenceEngine::SizeVector order(inShape.size());
for (size_t i = 0; i < order.size(); i++)
order[i] = i;
// Allow any offset before data
size_t offset((std::numeric_limits<size_t>::max)());
if (planar) {
Expand Down Expand Up @@ -93,10 +93,6 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig&
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
}

if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
IE_THROW() << "Operation can be initialized only with 4d input/output tensors!";
}

if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
IE_THROW() << "Operation supports only FP32 precisions!";
Expand Down
7 changes: 5 additions & 2 deletions docs/template_extension/extension.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,15 @@ Extension::Extension() {

//! [extension:dtor]
Extension::~Extension() {
try {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain");
ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain");
#ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain");
ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain");
#endif // OPENCV_IMPORT_ENABLED
#endif // NGRAPH_ONNX_IMPORT_ENABLED
} catch (...) {
}
}
//! [extension:dtor]

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>

#include "single_layer_tests/custom_operation.hpp"

using namespace LayerTestsDefinitions;

namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};

const std::vector<std::vector<size_t>> inputShapes = {
{1, 3},
{2, 5},
{1, 3, 10},
{1, 3, 1, 1},
{2, 5, 4, 4},
};


const auto customOpParams = testing::Combine(
testing::ValuesIn(netPrecisions),
testing::ValuesIn(inputShapes),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE)
);

INSTANTIATE_TEST_CASE_P(
smoke_CustomOperation,
CustomOpLayerTest,
customOpParams,
CustomOpLayerTest::getTestCaseName
);

} // namespace


Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>

#include "single_layer_tests/custom_operation.hpp"

using namespace LayerTestsDefinitions;

namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};

const std::vector<std::vector<size_t>> inputShapes = {
{1, 3},
{2, 5},
{1, 3, 10},
{1, 3, 1, 1},
{2, 5, 4, 4},
};


const auto customOpParams = testing::Combine(
testing::ValuesIn(netPrecisions),
testing::ValuesIn(inputShapes),
testing::Values(CommonTestUtils::DEVICE_CPU)
);

INSTANTIATE_TEST_CASE_P(
smoke_CustomOperation,
CustomOpLayerTest,
customOpParams,
CustomOpLayerTest::getTestCaseName
);

} // namespace

Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "shared_test_classes/single_layer/custom_operation.hpp"
#include "ngraph_functions/builders.hpp"

namespace LayerTestsDefinitions {

TEST_P(CustomOpLayerTest, CompareWithRefs) {
Run();
}

} // namespace LayerTestsDefinitions

Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ list(APPEND EXPORT_DEPENDENCIES
ngraphFunctions
)

set(DEPENDENCIES
template_extension
)

addIeTarget(
NAME ${TARGET_NAME}
TYPE STATIC
Expand All @@ -26,6 +30,7 @@ addIeTarget(
${EXPORT_DEPENDENCIES}
EXPORT_DEPENDENCIES
${EXPORT_DEPENDENCIES}
DEPENDENCIES ${DEPENDENCIES}
)

# CVS-55373
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"

namespace LayerTestsDefinitions {
typedef std::tuple<
InferenceEngine::Precision, // Net precision
InferenceEngine::SizeVector, // Input shapes
LayerTestsUtils::TargetDevice // Target device name
> CustomOpLayerParams;

class CustomOpLayerTest: public testing::WithParamInterface<CustomOpLayerParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<CustomOpLayerParams>& obj);

InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override;

CustomOpLayerTest();

protected:
void SetUp() override;
};

} // namespace LayerTestsDefinitions

Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "shared_test_classes/single_layer/custom_operation.hpp"
#include <ngraph/ngraph.hpp>
#include <ie_core.hpp>
#include <file_utils.h>

using namespace LayerTestsDefinitions;

static std::string get_extension_path() {
return FileUtils::makePluginLibraryName<char>({}, std::string("template_extension") + IE_BUILD_POSTFIX);
}

static const InferenceEngine::IExtensionPtr& get_extension(InferenceEngine::Core* core = nullptr) {
static InferenceEngine::IExtensionPtr extension;
if (!extension) {
// Core is created from the cache, so create a singleton extension
extension = std::make_shared<InferenceEngine::Extension>(get_extension_path());
if (core) {
core->AddExtension(extension);
}
}
return extension;
}

CustomOpLayerTest::CustomOpLayerTest(): LayerTestsUtils::LayerTestsCommon() {
get_extension(core.get());
}

std::string CustomOpLayerTest::getTestCaseName(const testing::TestParamInfo<CustomOpLayerParams>& obj) {
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShapes;
std::string targetDevice;
std::tie(netPrecision, inputShapes, targetDevice) = obj.param;

std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "trgDev=" << targetDevice;
return result.str();
}

InferenceEngine::Blob::Ptr CustomOpLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const {
return FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), 3, 0, 1);
}

void CustomOpLayerTest::SetUp() {
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShapes;
std::tie(netPrecision, inputShapes, targetDevice) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

auto params = ngraph::builder::makeParams(ngPrc, {inputShapes});

std::shared_ptr<ngraph::Node> customOp;
ASSERT_NO_THROW(customOp.reset(get_extension()->getOpSets()["custom_opset"].create("Template")));
customOp->set_argument(0, params[0]);
customOp->validate_and_infer_types();

ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(customOp)};
function = std::make_shared<ngraph::Function>(results, params, "CustomOpInference");
}

0 comments on commit 06d1a13

Please sign in to comment.