diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index c40b97209e1075..4d565ca0c2d975 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -752,12 +752,14 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { passes->registerPass(); passes->registerPass(); + passes->registerPass(); + passes->registerPass(); + passes->registerPass(); passes->registerPass(); passes->registerPass(); - passes->registerPass(); passes->registerPass(); passes->registerPass(); passes->registerPass(); @@ -775,7 +777,6 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) { #if GNA_LIB_VER == 2 passes->registerPass(); #endif - passes->registerPass(); passes->registerPass(); passIdx = passes->run(passIdx); }; diff --git a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp index ae731465025e05..8ab4af9f1083f9 100644 --- a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp +++ b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp @@ -1530,16 +1530,7 @@ void SubstituteScaleShiftBroadCastPass::run() { continue; } - // only 3d scaleshift supported where number of c is arbitrary - auto lastD = reshape_batch ? dataDims[1] : dataDims.back(); - if (lastD != weightsElements) { - THROW_GNA_EXCEPTION << "Unsupported layer: " << l->name - << " should have last dim(" << lastD << ") equal to weights(" << weightsElements << ") length"; - } - if (dataDims.size() == 2) { - THROW_GNA_EXCEPTION << "For layer: " << l->name - << " weights size(" << weightsElements<< ") invalid: should match input size of(" << lastD << ")"; - } + // TODO: add broadcasting rules checks gnalog() << "Substitution ScaleShift broadcast for layer: " << l->name << "\n"; if (nElements % scaleShift->_weights->size()) { @@ -2220,6 +2211,17 @@ void TransposeWeightsFromNCHWToNHWCPass::run() { } }; + auto transpInfoMatchWeightsSize = [](const std::vector &transpositionInfo, size_t weightsSize, const std::string &layerName) { + size_t totalElements = 0; + for (auto && transpositionInfoPart : transpositionInfo) { + totalElements += transpositionInfoPart.num_transpose_rows * transpositionInfoPart.num_transpose_columns; + } + if (totalElements != weightsSize) { + THROW_GNA_EXCEPTION << layerName << " weights elements from transposition info (" << totalElements + << ") don't match input dimensions (" << weightsSize << ")"; + } + }; + for (auto &&l : *pLayers) { if (LayerInfo(l).isScaleShift()) { std::vector transpositionInfo; @@ -2237,6 +2239,10 @@ void TransposeWeightsFromNCHWToNHWCPass::run() { } auto weightable = dynamic_cast(l.get()); IE_ASSERT(weightable != nullptr); + + size_t totalWeights = weightable->_weights->size(); + transpInfoMatchWeightsSize(transpositionInfo, totalWeights, l->name); + ConvertTensorFromNCHWToNHWC(weightable->precision.size(), 1, weightable->_weights->size(), weightable->_weights->cbuffer().as(), true, transpositionInfo); if (weightable->_biases) { @@ -2270,14 +2276,9 @@ void TransposeWeightsFromNCHWToNHWCPass::run() { // If we found a split it's not possible to rotate data THROW_GNA_EXCEPTION << l->name << " won't be transposed due to a split before it"; } - size_t totalColumns = 0; - for (auto && transpositionInfoPart : transpositionInfo) { - totalColumns += transpositionInfoPart.num_transpose_rows * transpositionInfoPart.num_transpose_columns; - } - if (weightsColumns != totalColumns) { - THROW_GNA_EXCEPTION << l->name << " weights columns from transposition info (" << totalColumns - << ") don't match input dimensions (" << weightsColumns << ")"; - } + + transpInfoMatchWeightsSize(transpositionInfo, weightsColumns, l->name); + ConvertTensorFromNCHWToNHWC(precision, weightsRows, weightsColumns, weightable->_weights->cbuffer().as(), true, transpositionInfo); gnalog() << l->name << " weights rows transposition info:\n"; @@ -2297,14 +2298,9 @@ void TransposeWeightsFromNCHWToNHWCPass::run() { // If we found a concat it's not possible to rotate data THROW_GNA_EXCEPTION << l->name << " won't be transposed due to a concat after it"; } - size_t totalRows = 0; - for (const auto& transpositionInfoPart : transpositionInfo) { - totalRows += transpositionInfoPart.num_transpose_rows * transpositionInfoPart.num_transpose_columns; - } - if (weightsRows != totalRows) { - THROW_GNA_EXCEPTION << l->name << " weights rows from transposition info (" << totalRows - << ") don't match output dimensions (" << weightsRows << ")"; - } + + transpInfoMatchWeightsSize(transpositionInfo, weightsRows, l->name); + ConvertTensorFromNCHWToNHWC(precision, weightsRows, weightsColumns, weightable->_weights->cbuffer().as(), false, transpositionInfo); gnalog() << l->name << " weights columns transposition info:\n"; diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp index 589ec41c83dd8d..9d9436db568c5d 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp @@ -44,8 +44,6 @@ std::vector disabledTestPatterns() { R"(.*ConstantResultSubgraphTest.*inPrc=(U8|I8|I32|U64|I64|BOOL).*)", // TODO: Issue 51528 R"(.*CachingSupport.*_(u8|i16)_.*)", - // TODO: Issue 51525 - R"(.*CachingSupport.*KSOFunction.*)", // TODO: Issue 57363 (Param -> Result subgraphs) R"(.*smoke_MemoryTest.*LOW_LATENCY.*iteration_count=1_.*)", // TODO: Issue 57368 (accuracy) diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/eltwise_reshape_activation.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/eltwise_reshape_activation.cpp index 8e7af93c892d00..3e97b5b16f6b3d 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/eltwise_reshape_activation.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/eltwise_reshape_activation.cpp @@ -13,7 +13,8 @@ const std::vector>> shapes = { {{1, 64}, {64, 1}}, {{8, 256}, {16, 128}}, {{6, 384}, {18, 128}}, - {{8, 2048}, {32, 512}} + {{8, 2048}, {32, 512}}, + {{2, 4, 64, 64}, {1, 8, 64, 64}} }; const std::vector netPrecisions = {