Skip to content

Commit

Permalink
fixed crash related to loading model with fq and sigmoid (openvinotoo…
Browse files Browse the repository at this point in the history
…lkit#6644)

* fixed crash related with loading model with fq and sigmoid

* renamed multiple_input.* to multiple_input_fq.*; removed two unnecessary FQ layers from smoke_fq_fusion_with_sigmoid test; moved FQ params to test params
  • Loading branch information
dmitriikhurtin authored and rnugmanx committed Aug 26, 2021
1 parent d4db8a2 commit 47dc594
Show file tree
Hide file tree
Showing 7 changed files with 223 additions and 4 deletions.
8 changes: 5 additions & 3 deletions inference-engine/src/gna_plugin/gna_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,6 @@ void GNAPlugin::UpdateInputScaleFromNetwork(InferenceEngine::CNNNetwork & networ
auto data = input.second->getInputData();
for (auto && nextToInputLayer : getInputTo(data)) {
if (!LayerInfo(nextToInputLayer.second).isFakeQuantize()) {
inputIdx++;
continue;
}
// replacing scale factor from this fq layer
Expand All @@ -493,6 +492,9 @@ void GNAPlugin::UpdateInputScaleFromNetwork(InferenceEngine::CNNNetwork & networ
scaleInput = (fqLayer.getLevels() - 1) / (2 * maxAbsVal);
}

IE_ASSERT(config.inputScaleFactors.size() > inputIdx);
IE_ASSERT(inputsDesc->inputScaleFactors.size() > inputIdx);

if (!config.inputScaleFactors.empty()) {
gnalog() << "Scale factor calculated during model quantization (" << scaleInput
<< ") will be used instead of user input (" << inputsDesc->inputScaleFactors[inputIdx] << ").\n";
Expand All @@ -505,9 +507,9 @@ void GNAPlugin::UpdateInputScaleFromNetwork(InferenceEngine::CNNNetwork & networ

config.inputScaleFactors[inputIdx] = scaleInput;
inputsDesc->inputScaleFactors[inputIdx] = scaleInput;

inputIdx++;
}

inputIdx++;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2177,7 +2177,7 @@ void MoveFakeQuantizeLayerIntoQuantParamsPass :: run() {
}

if (isFQFuseAllowed) {
getInputTo(prevData).clear();
getInputTo(prevData).erase(l->name);
}

// Connect all next layers after FQ to the layer that is before FQ
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <ie_core.hpp>
#include "ngraph_functions/builders.hpp"
#include "common_test_utils/test_constants.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"

namespace LayerTestsDefinitions {

typedef std::tuple<
std::string, // Target device name
InferenceEngine::Precision, // Network precision
size_t, // level
std::pair<float, float>, // min, max
size_t, // Input size
std::map<std::string, std::string> // Configuration
> fqFusionWithSigmoidParams;

class FqFusionWithSigmoidTest : public LayerTestsUtils::LayerTestsCommon,
public testing::WithParamInterface<fqFusionWithSigmoidParams> {
protected:
void SetUp() override {
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> config;
size_t levelFq;
std::pair<float, float> minMaxFq;
size_t inputSize;
std::tie(targetDevice, netPrecision, levelFq, minMaxFq, inputSize, config) = this->GetParam();
configuration.insert(config.begin(), config.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

auto input = ngraph::builder::makeParams(ngPrc, {{1, inputSize}});
auto constant = ngraph::builder::makeConstant(ngPrc, {1, inputSize}, std::vector<size_t>{1});
auto mul1 = ngraph::builder::makeEltwise(input[0], constant, ngraph::helpers::EltwiseTypes::ADD);
auto sigmoid1 = std::make_shared<ngraph::opset1::Sigmoid>(mul1);
auto mul2 = ngraph::builder::makeEltwise(input[0], sigmoid1, ngraph::helpers::EltwiseTypes::MULTIPLY);
auto fake3 = ngraph::builder::makeFakeQuantize(sigmoid1, ngPrc, levelFq,
{ 1 }, { minMaxFq.first }, { minMaxFq.second }, { minMaxFq.first }, { minMaxFq.second });
auto mul3 = ngraph::builder::makeEltwise(mul2, fake3, ngraph::helpers::EltwiseTypes::ADD);
auto result = std::make_shared<ngraph::opset7::Result>(mul3);
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, input, "fq_fusion_with_sigmoid");
}
public:
static std::string getTestCaseName(const testing::TestParamInfo<fqFusionWithSigmoidParams> &obj) {
std::string targetDevice;
InferenceEngine::Precision netPrecision;
size_t levelFq;
std::pair<float, float> minMaxFq;
size_t inputSize;
std::map<std::string, std::string> config;
std::tie(targetDevice, netPrecision, levelFq, minMaxFq, inputSize, config) = obj.param;
std::ostringstream result;
result << "netPrecision=" << netPrecision.name() << "_";
result << "IS=" << inputSize << "_";
result << "targetDevice=" << targetDevice << "_";
result << "levelFq=" << levelFq << "_";
result << "(minFq,maxFq)=" << std::to_string(minMaxFq.first) << "_" << std::to_string(minMaxFq.first) << "_";
result << "inputSize=" << std::to_string(inputSize);
return result.str();
}
}; // class FqFusionWithSigmoidTest

TEST_P(FqFusionWithSigmoidTest, CompareWithRefs) {
Run();
};

const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};

std::vector<size_t> levelFq = {
65535
};

std::vector<std::pair<float, float>> minMaxFq = {
{-1, 1},
{-5, 5}
};

std::vector<size_t> input = {
64,
};

std::map<std::string, std::string> additional_config = {
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
};

INSTANTIATE_TEST_SUITE_P(smoke_fq_fusion_with_sigmoid, FqFusionWithSigmoidTest,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(levelFq),
::testing::ValuesIn(minMaxFq),
::testing::ValuesIn(input),
::testing::Values(additional_config)),
FqFusionWithSigmoidTest::getTestCaseName);

} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <subgraph_tests/multiple_input_fq.hpp>
#include "common_test_utils/test_constants.hpp"

namespace SubgraphTestsDefinitions {
namespace {
std::vector<size_t> input = {
64,
};

std::map<std::string, std::string> additional_config = {
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
};
} // namespace

INSTANTIATE_TEST_SUITE_P(smoke_multiple_input, MultipleInputTest,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(input),
::testing::Values(additional_config)),
MultipleInputTest::getTestCaseName);
} // namespace SubgraphTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#ifndef MULTIPLE_INPUT_HPP
#define MULTIPLE_INPUT_HPP

#include "shared_test_classes/subgraph/multiple_input_fq.hpp"

namespace SubgraphTestsDefinitions {

TEST_P(MultipleInputTest, CompareWithRefs) {
Run();
};

} // namespace SubgraphTestsDefinitions

#endif // MULTIPLE_INPUT_HPP
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#ifndef SUBGRAPH_MULTIPLE_INPUT_HPP
#define SUBGRAPH_MULTIPLE_INPUT_HPP

#include "common_test_utils/test_common.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include <ie_core.hpp>

namespace SubgraphTestsDefinitions {
typedef std::tuple<
std::string, // Target device name
InferenceEngine::Precision, // Network precision
size_t, // Input size
std::map<std::string, std::string> // Configuration
> multipleInputParams;

class MultipleInputTest : public LayerTestsUtils::LayerTestsCommon,
public testing::WithParamInterface<multipleInputParams> {
protected:
void SetUp() override;
public:
static std::string getTestCaseName(const testing::TestParamInfo<multipleInputParams> &obj);
};
} // namespace SubgraphTestsDefinitions

#endif // SUBGRAPH_MULTIPLE_INPUT_HPP
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/subgraph/multiple_input_fq.hpp"

namespace SubgraphTestsDefinitions {

std::string MultipleInputTest::getTestCaseName(const testing::TestParamInfo<multipleInputParams> &obj) {
std::string targetDevice;
InferenceEngine::Precision netPrecision;
size_t inputSize;
std::map<std::string, std::string> config;
std::tie(targetDevice, netPrecision, inputSize, config) = obj.param;
std::ostringstream result;
result << "netPrecision=" << netPrecision.name() << "_";
result << "IS=" << inputSize << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}

void MultipleInputTest::SetUp() {
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> config;
size_t inputSize;
std::tie(targetDevice, netPrecision, inputSize, config) = this->GetParam();
configuration.insert(config.begin(), config.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto input = ngraph::builder::makeParams(ngPrc, {{1, inputSize}, {1, inputSize}, {1, inputSize}});
auto fake1 = ngraph::builder::makeFakeQuantize(input[0], ngPrc, 255, { 1 }, { -0.5 }, { 0.5 }, { -0.5 }, { 0.5 });
auto mul1 = ngraph::builder::makeEltwise(input[0], fake1, ngraph::helpers::EltwiseTypes::ADD);
auto fake2 = ngraph::builder::makeFakeQuantize(input[1], ngPrc, 255, { 1 }, { -0.5 }, { 0.5 }, { -0.5 }, { 0.5 });
auto mul2 = ngraph::builder::makeEltwise(input[1], fake2, ngraph::helpers::EltwiseTypes::ADD);
auto mul3 = ngraph::builder::makeEltwise(mul1, mul2, ngraph::helpers::EltwiseTypes::ADD);
auto fake3 = ngraph::builder::makeFakeQuantize(input[2], ngPrc, 255, { 1 }, { -0.5 }, { 0.5 }, { -0.5 }, { 0.5 });
auto mul4 = ngraph::builder::makeEltwise(fake3, mul3, ngraph::helpers::EltwiseTypes::ADD);
auto result = std::make_shared<ngraph::opset7::Result>(mul4);
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, input, "multiple_input");
}

} // namespace SubgraphTestsDefinitions

0 comments on commit 47dc594

Please sign in to comment.