Skip to content

Commit

Permalink
test
Browse files Browse the repository at this point in the history
  • Loading branch information
eshoguli committed Jul 17, 2023
1 parent 1f294b0 commit d062489
Show file tree
Hide file tree
Showing 6 changed files with 269 additions and 4 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <tuple>
#include <vector>
#include <string>

#include "test_utils/cpu_test_utils.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"

using namespace CPUTestUtils;

namespace SubgraphTestsDefinitions {

using commonConvParams = std::tuple<
InferenceEngine::SizeVector, // Kernel size
InferenceEngine::SizeVector, // Strides
std::vector<ptrdiff_t>, // Pad begin
std::vector<ptrdiff_t>, // Pad end
InferenceEngine::SizeVector, // Dilation
size_t, // Num out channels
ngraph::op::PadType, // Padding type
size_t // Number of groups
>;

using convConcatCPUParams = std::tuple<
nodeType, // Ngraph convolution type
commonConvParams, // Convolution params
InferenceEngine::SizeVector // Input shapes
>;

class ConvWithZeroPointFuseSubgraphTest : public testing::WithParamInterface<convConcatCPUParams>,
public CPUTestsBase,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj);

protected:
void SetUp() override;
std::string pluginTypeNode;
};

} // namespace SubgraphTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "ngraph/opsets/opset1.hpp"
#include "test_utils/convolution_params.hpp"
#include "subgraph_tests/include/conv_with_zero_point_fuse.hpp"

using namespace InferenceEngine;
using namespace CPUTestUtils;

namespace SubgraphTestsDefinitions {

std::string ConvWithZeroPointFuseSubgraphTest::getTestCaseName(testing::TestParamInfo<convConcatCPUParams> obj) {
std::ostringstream result;
nodeType type;
commonConvParams convParams;
SizeVector inputShapes;
std::tie(type, convParams, inputShapes) = obj.param;

result << "Type=" << nodeType2str(type) << "_";

SizeVector kernelSize, strides, dilation;
std::vector<ptrdiff_t> padBegin, padEnd;
size_t numOutChannels, numOfGroups;
ngraph::op::PadType paddingType;
std::tie(kernelSize, strides, padBegin, padEnd, dilation, numOutChannels, paddingType, numOfGroups) = convParams;

result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "K" << CommonTestUtils::vec2str(kernelSize) << "_";
result << "S" << CommonTestUtils::vec2str(strides) << "_";
result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
result << "D=" << CommonTestUtils::vec2str(dilation) << "_";
result << "O=" << numOutChannels << "_";
result << "G=" << numOfGroups << "_";
result << "AP=" << paddingType << "_";

return result.str();
}

void ConvWithZeroPointFuseSubgraphTest::SetUp() {
targetDevice = CommonTestUtils::DEVICE_CPU;
nodeType type;
commonConvParams convParams;
SizeVector inputShapes;

std::tie(type, convParams, inputShapes) = this->GetParam();
pluginTypeNode = nodeType2PluginType(type);
SizeVector kernelSize, strides, dilation;
std::vector<ptrdiff_t> padBegin, padEnd;
size_t numOutChannels, numOfGroups;
ngraph::op::PadType paddingType;

std::tie(kernelSize, strides, padBegin, padEnd, dilation, numOutChannels, paddingType, numOfGroups) = convParams;
selectedType = ".*_I8";

auto inputParams = ngraph::builder::makeParams(ngraph::element::f32, {inputShapes});
const auto fq = ngraph::builder::makeFakeQuantize(
inputParams[0],
ov::element::f32,
256,
{1, 1, 1, 1},
{-12.8f},
{12.7f},
{-12.8f},
{12.7f});

auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(inputParams));

std::vector<std::shared_ptr<ngraph::Node>> branches(2);
{
ngraph::Strides strides{1, 1};
ngraph::Shape pads_begin{0, 0}, pads_end{0, 0}, kernel{1, 1};
branches[0] = std::make_shared<ngraph::opset1::MaxPool>(fq,
strides,
pads_begin,
pads_end,
kernel);
}
{
const auto fq_conv_data = ngraph::builder::makeFakeQuantize(
fq,
ov::element::f32,
256,
{1, 1, 1, 1},
{-12.8},
{12.7},
{-12.8},
{12.7});

const InferenceEngine::SizeVector weights_const_shape = {numOutChannels, inputShapes[1], kernelSize[0], kernelSize[1]};
const auto weights_const_values = std::vector<int>(ngraph::shape_size(weights_const_shape), 1);
const auto weights_const = ngraph::builder::makeConstant(ov::element::i8, weights_const_shape, weights_const_values);

const auto weights_convert = ngraph::builder::makeConversion(
weights_const,
ov::element::f32,
ngraph::helpers::ConversionTypes::CONVERT);

const auto weights_multiply = std::make_shared<ov::opset10::Multiply>(
weights_convert,
ngraph::builder::makeConstant(ov::element::f32,
{numOutChannels, 1, 1, 1},
std::vector<float>(numOutChannels, 1.0)));

switch (type) {
case nodeType::convolution: {
branches[1] = ngraph::builder::makeConvolution(fq_conv_data,
weights_multiply,
ngraph::element::f32,
kernelSize,
strides,
padBegin,
padEnd,
dilation,
paddingType,
numOutChannels);
break;
}
case nodeType::groupConvolution: {
branches[1] = ngraph::builder::makeGroupConvolution(
fq_conv_data,
std::make_shared<ov::opset10::Reshape>(
weights_multiply,
ngraph::builder::makeConstant(
ov::element::i32,
{5},
std::vector<size_t>{1, numOutChannels, inputShapes[1], kernelSize[0], kernelSize[1]}),
true),
ngraph::element::f32,
strides,
padBegin,
padEnd,
dilation,
paddingType);
break;
}
default: {
throw std::runtime_error("Subgraph concat test doesn't support this type of operation");
}
}
}

auto concat = ngraph::builder::makeConcat(ngraph::OutputVector{branches[0], branches[1]}, 1);

ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(concat)};
function = std::make_shared<ngraph::Function>(results, inputParams, "ConvWithZeroPointFuseSubgraphTest");
}

TEST_P(ConvWithZeroPointFuseSubgraphTest, CompareWithRefs) {
Run();

CheckPluginRelatedResults(executableNetwork, pluginTypeNode);
};

const ngraph::op::PadType paddingType{ngraph::op::PadType::EXPLICIT};
const size_t numOutChannels{256};
const SizeVector inputShapes2D{1, 32, 136, 136};
const SizeVector dilation2D{1, 1};

commonConvParams convParams = commonConvParams{{1, 1}, {1, 1}, {0, 0}, {0, 0}, dilation2D, numOutChannels, paddingType, 1};

const auto params2DConv = ::testing::Combine(::testing::ValuesIn({nodeType::convolution, nodeType::groupConvolution}),
::testing::Values(convParams),
::testing::Values(inputShapes2D));

INSTANTIATE_TEST_SUITE_P(smoke_ConvWithZeroPointFuse,
ConvWithZeroPointFuseSubgraphTest,
params2DConv,
ConvWithZeroPointFuseSubgraphTest::getTestCaseName);

} // namespace SubgraphTestsDefinitions
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr<const ov:
}

bool CPUTestsBase::primTypeCheck(std::string primType) const {
return selectedType.find(CPUTestsBase::any_type) != std::string::npos || selectedType == primType;
return selectedType.find(CPUTestsBase::any_type) != std::string::npos || std::regex_match(primType, std::regex(selectedType));
}

std::string CPUTestsBase::getTestCaseName(CPUSpecificParams params) {
Expand Down Expand Up @@ -423,7 +423,8 @@ void CheckNumberOfNodesWithType(InferenceEngine::ExecutableNetwork &execNet, con
CheckNumberOfNodesWithTypes(execNet, {nodeType}, expectedCount);
}

std::vector<CPUSpecificParams> filterCPUInfoForDevice(const std::vector<CPUSpecificParams>& CPUParams) {
std::vector<CPUSpecificParams> filterCPUInfoForDevice(const std::vector<CPUSpecificParams>& CPUParams,
const bool int8SupportOnly) {
std::vector<CPUSpecificParams> resCPUParams;
const int selectedTypeIndex = 3;

Expand All @@ -432,7 +433,7 @@ std::vector<CPUSpecificParams> filterCPUInfoForDevice(const std::vector<CPUSpeci

if (selectedTypeStr.find("jit") != std::string::npos && !InferenceEngine::with_cpu_x86_sse42())
continue;
if (selectedTypeStr.find("sse42") != std::string::npos && !InferenceEngine::with_cpu_x86_sse42())
if (selectedTypeStr.find("sse42") != std::string::npos && (!InferenceEngine::with_cpu_x86_sse42() || int8SupportOnly))
continue;
if (selectedTypeStr.find("avx") != std::string::npos && !InferenceEngine::with_cpu_x86_avx())
continue;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ const std::map<std::string, std::string> cpuBF16PluginConfig =

// utility functions
std::vector<CPUSpecificParams> filterCPUSpecificParams(const std::vector<CPUSpecificParams>& paramsVector);
std::vector<CPUSpecificParams> filterCPUInfoForDevice(const std::vector<CPUSpecificParams>& CPUParams);
std::vector<CPUSpecificParams> filterCPUInfoForDevice(const std::vector<CPUSpecificParams>& CPUParams, const bool int8SupportOnly = false);
void CheckNumberOfNodesWithType(const ov::CompiledModel &compiledModel, const std::string& nodeType, size_t expectedCount);
void CheckNumberOfNodesWithType(InferenceEngine::ExecutableNetwork &execNet, const std::string& nodeType, size_t expectedCount);
void CheckNumberOfNodesWithTypes(const ov::CompiledModel &compiledModel, const std::unordered_set<std::string>& nodeTypes, size_t expectedCount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,19 @@ std::shared_ptr<ngraph::Node> makeConvolution(const ngraph::Output<Node> &in,
const std::vector<float> &filterWeights = {},
const std::vector<float> &biasesWeights = {});

std::shared_ptr<ngraph::Node> makeConvolution(const ngraph::Output<Node>& in_data,
const ngraph::Output<Node>& in_weights,
const element::Type& type,
const std::vector<size_t>& filterSize,
const std::vector<size_t>& strides,
const std::vector<ptrdiff_t>& padsBegin,
const std::vector<ptrdiff_t>& padsEnd,
const std::vector<size_t>& dilations,
const op::PadType& autoPad,
size_t numOutChannels,
bool addBiases = false,
const std::vector<float>& biasesWeights = {});

std::shared_ptr<ngraph::Node> makeGroupConvolution(const ngraph::Output<Node> &in,
const element::Type &type,
const std::vector<size_t> &filterSize,
Expand Down
30 changes: 30 additions & 0 deletions src/tests/ngraph_helpers/ngraph_functions/src/convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,35 @@ std::shared_ptr<Node> makeConvolution(const ngraph::Output<Node> &in,
}
}

std::shared_ptr<Node> makeConvolution(const ngraph::Output<Node>& in_data,
const ngraph::Output<Node>& in_weights,
const element::Type &type,
const std::vector<size_t> &filterSize,
const std::vector<size_t> &strides,
const std::vector<ptrdiff_t> &padsBegin,
const std::vector<ptrdiff_t> &padsEnd,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
bool addBiases,
const std::vector<float> &biasesWeights) {
auto shape = in_data.get_partial_shape();
auto conv = std::make_shared<opset1::Convolution>(in_data,
in_weights,
strides,
padsBegin,
padsEnd,
dilations,
autoPad);
if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(type, {1, numOutChannels , 1, 1}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(conv, biasesWeightsNode);
return add;
} else {
return conv;
}
}

} // namespace builder
} // namespace ngraph

0 comments on commit d062489

Please sign in to comment.