diff --git a/docs/template_plugin/src/template_infer_request.cpp b/docs/template_plugin/src/template_infer_request.cpp index 2f9d0446ca1fb5..94d5620001da3d 100644 --- a/docs/template_plugin/src/template_infer_request.cpp +++ b/docs/template_plugin/src/template_infer_request.cpp @@ -4,6 +4,9 @@ #include "template_infer_request.hpp" +#include +#include + #include #include #include @@ -45,8 +48,6 @@ TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& }; _executable = _executableNetwork->_plugin->_backend->compile(_executableNetwork->_function); - _parameters = _executableNetwork->_function->get_parameters(); - _results = _executableNetwork->_function->get_results(); allocateDeviceBuffers(); allocateBlobs(); @@ -65,35 +66,51 @@ void TemplateInferRequest::allocateDeviceBuffers() { _outputTensors.resize(_networkOutputs.size()); } +template +static void AllocateImplSingle(BlobMap& blobMap, + BlobMap& networkBlobMap, + const BlobData& blobData, + GetNetworkPrecisionF&& GetNetworkPrecision, + const SizeVector& dims) { + const auto& precision = blobData.second->getTensorDesc().getPrecision(); + auto layout = blobData.second->getTensorDesc().getLayout(); + if (dims.size() > 0 && layout == InferenceEngine::Layout::SCALAR) { + layout = InferenceEngine::Layout::ANY; + } + const auto deviceLayout = TensorDesc::getLayoutByDims(dims); + Blob::Ptr blob; + blob = make_blob_with_precision({precision, dims, layout}); + blob->allocate(); + blobMap[blobData.first] = blob; + + auto networkPresion = InferenceEngine::details::convertPrecision(GetNetworkPrecision(blobData.first)); + Blob::Ptr networkBlob; + if (precision == networkPresion && layout == deviceLayout) { + networkBlob = blob; + } else { + networkBlob = make_blob_with_precision({networkPresion, dims, deviceLayout}); + networkBlob->allocate(); + } + networkBlobMap[blobData.first] = networkBlob; +} + template static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision, bool isInputBlob = true) { - for (auto&& userData : userDataMap) { - const auto& dims = userData.second->getTensorDesc().getDims(); - const auto deviceLayout = TensorDesc::getLayoutByDims(dims); - const auto userPrecision = userData.second->getTensorDesc().getPrecision(); - const auto userLayout = userData.second->getTensorDesc().getLayout(); - - const auto networkPrecision = InferenceEngine::details::convertPrecision(GetNetworkPrecision(userData.first)); - Blob::Ptr userBlob = make_blob_with_precision({userPrecision, dims, userLayout}); - userBlob->allocate(); - userBlobMap[userData.first] = userBlob; - - Blob::Ptr deviceBlob; - if (userPrecision == networkPrecision && userLayout == deviceLayout) { - deviceBlob = userBlob; + for (const auto& userData : userDataMap) { + auto partialShape = userData.second->getPartialShape(); + SizeVector dims; + if (partialShape.is_static()) { + dims = userData.second->getTensorDesc().getDims(); + } else if (partialShape.rank().is_static()) { + dims = SizeVector(partialShape.rank().get_length(), 0); } else { - if (userLayout != deviceLayout && !isInputBlob) { - IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs"; - } - deviceBlob = make_blob_with_precision({networkPrecision, dims, deviceLayout}); - deviceBlob->allocate(); + dims = SizeVector{0}; } - - deviceBlobMap[userData.first] = deviceBlob; + AllocateImplSingle(userBlobMap, deviceBlobMap, userData, GetNetworkPrecision, dims); } } @@ -239,8 +256,8 @@ void TemplateInferRequest::inferPreprocess() { IInferRequestInternal::execDataPreprocessing(_deviceInputs); for (auto&& networkInput : _deviceInputs) { auto index = _executableNetwork->_inputIndex[networkInput.first]; - const auto& parameter = _parameters[index]; - const auto& parameterShape = parameter->get_shape(); + const auto& parameter = _executableNetwork->_function->get_parameters()[index]; + auto parameterShape = networkInput.second->getTensorDesc().getDims(); const auto& parameterType = parameter->get_element_type(); _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( parameterType, @@ -254,7 +271,11 @@ void TemplateInferRequest::inferPreprocess() { if (outputBlob->getTensorDesc().getPrecision() == networkOutput->getTensorDesc().getPrecision()) { networkOutput = outputBlob; } - const auto& result = _results[index]; + const auto& result = _executableNetwork->_function->get_results()[index]; + if (result->get_output_partial_shape(0).is_dynamic()) { + _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(); + continue; + } const auto& resultShape = result->get_shape(); const auto& resultType = result->get_element_type(); _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( @@ -287,19 +308,190 @@ void TemplateInferRequest::waitPipeline() { void TemplateInferRequest::inferPostprocess() { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, _profilingTask[Postprocess]); auto start = Time::now(); - for (auto&& output : _outputs) { - auto outputBlob = output.second; + for (auto&& output : _networkOutputs) { + auto index = _executableNetwork->_outputIndex[output.first]; + const auto& result = _executableNetwork->_function->get_results()[index]; + if (result->get_output_partial_shape(0).is_dynamic()) { + // Touch blob to allocate it + Blob::Ptr blob; + GetBlob(output.first); + } + auto outputBlob = _outputs.at(output.first); auto networkOutput = _networkOutputBlobs[output.first]; - // perform precision conversion of network output's precision and computational - // graph output's precision are different if (outputBlob->getTensorDesc().getPrecision() != networkOutput->getTensorDesc().getPrecision()) { blobCopy(networkOutput, outputBlob); + } else if (result->get_output_partial_shape(0).is_dynamic()) { + auto tensor = _outputTensors[_executableNetwork->_outputIndex.at(output.first)]; + tensor->read(InferenceEngine::as(outputBlob)->wmap().as(), + tensor->get_size_in_bytes()); } } _durations[Postprocess] = Time::now() - start; } // ! [infer_request:infer_postprocess] +// ! [infer_request:get_blob] +InferenceEngine::Blob::Ptr TemplateInferRequest::GetBlob(const std::string& name) { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "GetBlob"); + InputInfo::Ptr foundInput; + DataPtr foundOutput; + Blob::Ptr data; + const SizeVector oneVector = {1}; + if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) { + // ROI blob is returned only if it was set previously. Otherwise default blob is returned. + auto it = _preProcData.find(name); + if (it != _preProcData.end()) { + data = it->second->getRoiBlob(); + } else { + data = _inputs[name]; + SizeVector dims; + if (!data) { + auto&& parameters = _executableNetwork->_function->get_parameters(); + const auto& pshape = parameters.at(_executableNetwork->_inputIndex.at(name))->get_partial_shape(); + dims = pshape.is_dynamic() ? SizeVector({0}) : pshape.get_shape(); + AllocateImplSingle( + _inputs, + _deviceInputs, + *_networkInputs.find(name), + [&](const std::string& blobName) { + return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type(); + }, + dims); + data = _inputs[name]; + } else { + dims = data->getTensorDesc().getDims(); + } + checkBlob(data, name, true, foundInput->getTensorDesc().getLayout() != SCALAR ? dims : oneVector); + auto& devBlob = _deviceInputs[name]; + if (preProcessingRequired(foundInput, data, devBlob)) { + // if no devBlob, performs inplace + addInputPreProcessingFor(name, data, devBlob ? devBlob : _inputs[name]); + } + } + } else { + data = _outputs[name]; + SizeVector dims; + if (!foundOutput->isDynamic()) { + dims = foundOutput->getTensorDesc().getDims(); + } else if (_outputTensors[_executableNetwork->_outputIndex.at(name)]->get_partial_shape().is_static()) { + dims = _outputTensors[_executableNetwork->_outputIndex.at(name)]->get_shape(); + } else { + IE_THROW() << "Output blob dimensions are not all known for output name " << name + << " with partial shape: " << foundOutput->getPartialShape(); + } + + if (data) { + if (data->getTensorDesc().getDims() != dims) { + // TODO: implement something smart here instead of raw re-allocation + data.reset(); + } + } + + if (!data) { + auto&& results = _executableNetwork->_function->get_results(); + AllocateImplSingle( + _outputs, + _networkOutputBlobs, + *_networkOutputs.find(name), + [&](const std::string& blobName) { + return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); + }, + dims); + data = _outputs[name]; + } + checkBlob(data, name, false, foundOutput->getTensorDesc().getLayout() != SCALAR ? dims : oneVector); + } + return data; +} +// ! [infer_request:get_blob] + +// ! [infer_request:set_blob] +void TemplateInferRequest::SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr& userBlob) { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "SetBlob"); + if (name.empty()) { + IE_THROW(NotFound) << "Failed to set blob with empty name"; + } + if (!userBlob) + IE_THROW(NotAllocated) << "Failed to set empty blob with name: \'" << name << "\'"; + const bool compoundBlobPassed = userBlob->is(); + const bool remoteBlobPassed = userBlob->is(); + if (!compoundBlobPassed && !remoteBlobPassed && userBlob->buffer() == nullptr) + IE_THROW(NotAllocated) << "Input data was not allocated. Input name: \'" << name << "\'"; + if (userBlob->size() == 0) { + IE_THROW() << "Input data is empty. Input name: \'" << name << "\'"; + } + + InputInfo::Ptr foundInput; + DataPtr foundOutput; + size_t dataSize = userBlob->size(); + if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) { + // ilavreno: the condition below is obsolete, but we need an exact list of precisions + // which are supports by G-API preprocessing + if (foundInput->getPrecision() != userBlob->getTensorDesc().getPrecision()) { + IE_THROW(ParameterMismatch) + << "Failed to set Blob with precision not corresponding to user input precision"; + } + + auto& devBlob = _deviceInputs[name]; + auto usrDims = userBlob->getTensorDesc().getDims(); + auto usrLayout = userBlob->getTensorDesc().getLayout(); + auto devDims = devBlob->getTensorDesc().getDims(); + auto devLayout = devBlob->getTensorDesc().getLayout(); + auto devPrecision = devBlob->getTensorDesc().getPrecision(); + if (foundInput->getInputData()->isDynamic() && (devDims != usrDims || devLayout != usrLayout)) { + devBlob = make_blob_with_precision({devPrecision, usrDims, TensorDesc::getLayoutByDims(usrDims)}); + devBlob->allocate(); + _deviceInputs[name] = devBlob; + } + const bool preProcRequired = preProcessingRequired(foundInput, userBlob, devBlob); + if (compoundBlobPassed && !preProcRequired) { + IE_THROW(NotImplemented) << "cannot set compound blob: supported only for input pre-processing"; + } + + if (preProcRequired) { + addInputPreProcessingFor(name, userBlob, devBlob ? devBlob : _inputs[name]); + } else { + size_t inputSize = devBlob->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR + ? InferenceEngine::details::product(devBlob->getTensorDesc().getDims()) + : 1; + if (dataSize != inputSize) { + IE_THROW() << "Input blob size is not equal network input size (" << dataSize << "!=" << inputSize + << ")."; + } + _inputs[name] = userBlob; + devBlob = userBlob; + } + } else { + if (compoundBlobPassed) { + IE_THROW(NotImplemented) << "cannot set compound blob: supported only for input pre-processing"; + } + auto& devBlob = _networkOutputBlobs[name]; + auto usrDims = userBlob->getTensorDesc().getDims(); + auto usrLayout = userBlob->getTensorDesc().getLayout(); + auto devDims = devBlob->getTensorDesc().getDims(); + auto devLayout = devBlob->getTensorDesc().getLayout(); + auto devPrecision = devBlob->getTensorDesc().getPrecision(); + if (foundOutput->isDynamic() && (devDims != usrDims || devLayout != usrLayout)) { + devBlob = make_blob_with_precision({devPrecision, usrDims, TensorDesc::getLayoutByDims(usrDims)}); + devBlob->allocate(); + _networkOutputBlobs[name] = devBlob; + } + size_t outputSize = devBlob->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR + ? details::product(devBlob->getTensorDesc().getDims()) + : 1; + if (dataSize != outputSize) { + IE_THROW() << "Output blob size is not equal network output size (" << dataSize << "!=" << outputSize + << ")."; + } + if (foundOutput->getPrecision() != userBlob->getTensorDesc().getPrecision()) { + IE_THROW(ParameterMismatch) + << "Failed to set Blob with precision not corresponding to user output precision"; + } + _outputs[name] = userBlob; + } +} +// ! [infer_request:set_blob] + // ! [infer_request:get_performance_counts] std::map TemplateInferRequest::GetPerformanceCounts() const { std::map perfMap; diff --git a/docs/template_plugin/src/template_infer_request.hpp b/docs/template_plugin/src/template_infer_request.hpp index 0e1b904ccdb75f..f7e9a4f9952df5 100644 --- a/docs/template_plugin/src/template_infer_request.hpp +++ b/docs/template_plugin/src/template_infer_request.hpp @@ -40,6 +40,9 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal { void waitPipeline(); void inferPostprocess(); + InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override; + void SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr& userBlob) override; + private: void allocateDeviceBuffers(); void allocateBlobs(); @@ -52,8 +55,6 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal { std::array, numOfStages> _durations; InferenceEngine::BlobMap _networkOutputBlobs; - ngraph::ParameterVector _parameters; - ngraph::ResultVector _results; std::vector> _inputTensors; std::vector> _outputTensors; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_dynamic.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_dynamic.cpp new file mode 100644 index 00000000000000..221fe0851581e3 --- /dev/null +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_dynamic.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request_dynamic.hpp" + +using namespace BehaviorTestsDefinitions; + +namespace { + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector> configs = { + {} +}; + +INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestDynamicTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), + ::testing::ValuesIn(configs)), + InferRequestDynamicTests::getTestCaseName); + +} // namespace + diff --git a/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h b/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h index 42288ba84ac31f..25d527b7919a97 100644 --- a/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h +++ b/inference-engine/src/inference_engine/include/ie/cpp/ie_cnn_network.h @@ -182,8 +182,20 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) { * @param inputShapes A map of pairs: name of corresponding data and its dimension. */ void reshape(const ICNNNetwork::InputShapes& inputShapes); + + /** + * @brief Run shape inference with new input shapes for the network + * @param inputShapes A map of pairs: name of corresponding data and its dimension. + */ + void reshape(const std::initializer_list& inputShapes); IE_SUPPRESS_DEPRECATED_END + /** + * @brief Run shape inference with new input partial shapes for the network + * @param inputShapes A map of pairs: name of corresponding data and its dimension. + */ + void reshape(const std::map& inputShapes); + /** * @brief Serialize network to IR and weights files. * diff --git a/inference-engine/src/inference_engine/include/ie/ie_blob.h b/inference-engine/src/inference_engine/include/ie/ie_blob.h index b25d180747fe57..21342ae73da010 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_blob.h +++ b/inference-engine/src/inference_engine/include/ie/ie_blob.h @@ -187,6 +187,28 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { */ virtual bool deallocate() noexcept = 0; + /** + * @brief Set new shape for blob, deallocate/allocate if new total size is bigger than previous one. + * + * @param dims new shape + */ + void setShape(const SizeVector& dims) { + if (properProduct(dims) > properProduct(getTensorDesc().getDims())) { + // New blob shape requires more memory than old one -- reallocate + if (!deallocate()) + IE_THROW() << "Cannot deallocate blob while an attempt to enlarge blob area in setShape."; + + // Old and new ranks should match as well as layouts + getTensorDesc().setDims(dims); + + allocate(); + // no way to detect if allocation is successful other than map/unmap that we wouldn't like to do here + } else { + // Don't shrink area when new size fit the existing area + getTensorDesc().setDims(dims); + } + } + /** * @deprecated Cast to MemoryBlob and use new wlock/rwlock API instead. * Blob class can represent compound blob, which do not refer to the only solid memory. @@ -239,6 +261,17 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { return std::accumulate(std::begin(dims), std::end(dims), (size_t)1, std::multiplies()); } + /** + * @deprecated Cast to MemoryBlob and use its API instead. + * @brief Multiplies the dimension vector values. Size of a scalar is 1 instead of 0 as for product. + * + * @param dims Reference to a vector with dimension values of type size_t + * @return Result of multiplication + */ + static size_t properProduct(const SizeVector& dims) noexcept { + return std::accumulate(std::begin(dims), std::end(dims), (size_t)1, std::multiplies()); + } + /** * @brief Gets an allocator for allocator-based blobs * diff --git a/inference-engine/src/inference_engine/include/ie/ie_data.h b/inference-engine/src/inference_engine/include/ie/ie_data.h index 86a3e937adff67..95776398a8d79c 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_data.h +++ b/inference-engine/src/inference_engine/include/ie/ie_data.h @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -38,6 +39,16 @@ class INFERENCE_ENGINE_API_CLASS(Data) { */ Data(const std::string& name, Precision _precision, Layout layout = NCHW); + /** + * @brief A constructor with partial shape + * + * @param name Name of the data node + * @param _precision Precision of the data + * @param shape Partial shape of the data + * @param layout Data layout + */ + Data(const std::string& name, Precision _precision, const ngraph::PartialShape& shape, Layout layout = BLOCKED); + /** * @brief A constructor with tensor descriptor * @@ -91,6 +102,20 @@ class INFERENCE_ENGINE_API_CLASS(Data) { * @param layout new layout */ void reshape(const SizeVector& dims, Layout layout); + /** + * @brief changes dims and layout at same time + * + * @param dims new dimensions + * @param layout new layout + */ + void reshape(const std::initializer_list& dims, Layout layout); + /** + * @brief changes dims and layout at same time + * + * @param dims new dimensions + * @param layout new layout + */ + void reshape(const ngraph::PartialShape& dims, Layout layout); /** * @brief Gets the layout value for this Data instance @@ -142,6 +167,18 @@ class INFERENCE_ENGINE_API_CLASS(Data) { */ const UserValue& getUserObject() const; + /** + * @brief Checks if current data has dynamic shapes + * @return true if data has dynamic shapes + */ + bool isDynamic() const; + + /** + * @brief Returns partial shapes + * @return shapes which can have dynamic dimensions + */ + const ngraph::PartialShape& getPartialShape() const; + /** * @private * @brief Don't touch this field. An implementation details for Data object. @@ -163,5 +200,7 @@ class INFERENCE_ENGINE_API_CLASS(Data) { * @brief A tensor descriptor */ mutable TensorDesc tensorDesc; + + ngraph::PartialShape pShape; }; } // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/include/ie/ie_icnn_network.hpp b/inference-engine/src/inference_engine/include/ie/ie_icnn_network.hpp index acedb48bb2ce94..30327c14fec879 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_icnn_network.hpp +++ b/inference-engine/src/inference_engine/include/ie/ie_icnn_network.hpp @@ -183,6 +183,22 @@ class INFERENCE_ENGINE_API_CLASS(ICNNNetwork) : public std::enable_shared_from_t return NOT_IMPLEMENTED; }; + /** + * @deprecated Use InferenceEngine::CNNNetwork wrapper instead + * @brief Run shape inference with new input shapes for the network + * + * @param partialShapes - map of pairs: name of corresponding data and its dimension. + * @param resp Pointer to the response message that holds a description of an error if any occurred + * @return Status code of the operation + */ + INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead") + virtual StatusCode reshape(const std::map& partialShapes, + ResponseDesc* resp) noexcept { + (void)partialShapes; + (void)resp; + return NOT_IMPLEMENTED; + }; + /** * @deprecated Use InferenceEngine::CNNNetwork wrapper instead * @brief Serialize network to IR and weights files. diff --git a/inference-engine/src/inference_engine/include/ie/ie_input_info.hpp b/inference-engine/src/inference_engine/include/ie/ie_input_info.hpp index e844bf7b8442ef..ef2ee6ced12274 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_input_info.hpp +++ b/inference-engine/src/inference_engine/include/ie/ie_input_info.hpp @@ -138,6 +138,17 @@ class InputInfo { return _inputData->getTensorDesc(); } + /** + * @brief Returns the input shape. May have undefined dimensions. + * @return PartialShape object describing input shape. + */ + ngraph::PartialShape getPartialShape() { + if (!_inputData) { + IE_THROW() << "Data is empty!"; + } + return _inputData->getPartialShape(); + } + /** * @brief Gets pre-process info for the input * @return A reference to the PreProcessInfo instance that contains pre-process info for this input diff --git a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp index 9e3171d3a1012d..8ad6e3c4e74d59 100644 --- a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.cpp @@ -65,24 +65,21 @@ void CNNNetworkNGraphImpl::createDataForResult(const ::ngraph::Output<::ngraph:: return false; } }; - // query shape from ngraph::Parameter output shape and check there are no zeros in it - SizeVector dims; - if (output.get_partial_shape().is_static()) { - dims = output.get_shape(); - } - for (const auto& dim : dims) { - if (!dim) + auto shape = output.get_partial_shape(); + auto rank = shape.rank().is_static() ? shape.rank().get_length() : 0; + for (const auto& dim : shape) { + if (dim.is_static() && dim.get_length() == 0) IE_THROW() << outName << " has zero dimension which is not allowed"; } if (ptr) { const auto origLayout = ptr->getTensorDesc().getLayout(); - const auto layout = isCompatible(dims.size(), origLayout) ? origLayout : TensorDesc::getLayoutByDims(dims); - ptr->reshape(dims, layout); + const auto layout = isCompatible(rank, origLayout) ? origLayout : TensorDesc::getLayoutByRank(rank); + ptr->reshape(shape, layout); } else { - const auto layout = TensorDesc::getLayoutByDims(dims); + const auto layout = TensorDesc::getLayoutByRank(rank); const auto precision = details::convertPrecision(output.get_element_type()); - ptr.reset(new Data(outName, {precision, dims, layout})); + ptr.reset(new Data(outName, precision, shape, layout)); } } @@ -180,12 +177,15 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) { for (const auto& inputInfo : inputs) { InputInfo::Ptr info = std::make_shared(); const auto& name = inputInfo.second->getInputData()->getName(); - DataPtr input = std::make_shared(name, inputInfo.second->getInputData()->getTensorDesc()); + const auto& inData = inputInfo.second->getInputData(); + DataPtr input = + std::make_shared(name, inData->getPrecision(), inData->getPartialShape(), inData->getLayout()); _data[name] = input; info->setInputData(input); info->getPreProcess() = inputInfo.second->getPreProcess(); info->setPrecision(inputInfo.second->getPrecision()); - info->setLayout(inputInfo.second->getLayout()); + if (!inData->isDynamic()) + info->setLayout(inputInfo.second->getLayout()); _inputData[name] = info; } } @@ -296,9 +296,9 @@ size_t CNNNetworkNGraphImpl::getBatchSize() const noexcept { }); for (const auto& param : params) { - if (param->get_partial_shape().rank().is_dynamic()) + if (param->get_output_partial_shape(0).rank().is_dynamic()) continue; - auto pshape = param->get_partial_shape(); + auto pshape = param->get_output_partial_shape(0); auto rank = pshape.rank().get_length(); // WA: for speech recognition and scalar layouts (copy-past from CNNNetwork) if ((rank == 2 || rank > 3) && pshape[0].is_static()) { @@ -312,7 +312,7 @@ void CNNNetworkNGraphImpl::reshape() { reshape({}); } -StatusCode CNNNetworkNGraphImpl::reshape(const std::map>& inputShapes, +StatusCode CNNNetworkNGraphImpl::reshape(const std::map& inputShapes, ResponseDesc* responseDesc) noexcept { if (inputShapes.empty()) return OK; @@ -326,7 +326,7 @@ StatusCode CNNNetworkNGraphImpl::reshape(const std::mapget_partial_shape().is_dynamic() || param->get_shape() != it->second) { + if (param->get_output_partial_shape(0).is_dynamic() || param->get_output_partial_shape(0) != it->second) { needReshape = true; break; } @@ -338,7 +338,7 @@ StatusCode CNNNetworkNGraphImpl::reshape(const std::map originalInputShapes; for (const auto& param : params) { - originalInputShapes[param->get_friendly_name()] = param->get_partial_shape(); + originalInputShapes[param->get_friendly_name()] = param->get_output_partial_shape(0); } try { @@ -359,6 +359,14 @@ StatusCode CNNNetworkNGraphImpl::reshape(const std::map>& inputShapes, + ResponseDesc* responseDesc) noexcept { + std::map shapes; + for (const auto& shape : inputShapes) + shapes[shape.first] = ngraph::PartialShape(shape.second); + return reshape(shapes, responseDesc); +} + void CNNNetworkNGraphImpl::reshape(const std::map& inputShapes) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "CNNNetworkNGraphImpl::reshape"); @@ -565,7 +573,7 @@ StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* respons if (i) ss << ", "; ss << "\"" << original_parameters[i]->get_friendly_name() - << "\": " << original_parameters[i]->get_partial_shape(); + << "\": " << original_parameters[i]->get_output_partial_shape(0); } // ill-formed logic from the past setBatchSize (we keep it for backward-compatibility) @@ -575,7 +583,7 @@ StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* respons [](std::shared_ptr lhs, std::shared_ptr rhs) { return lhs->get_friendly_name() < rhs->get_friendly_name(); }); - const auto first_parameter_pshape = first_parameter->get_partial_shape(); + const auto first_parameter_pshape = first_parameter->get_output_partial_shape(0); if (first_parameter_pshape.is_dynamic()) return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) << "Cannot set batch! Function contains parameter with partially defined shape!" << ss.str(); @@ -587,7 +595,7 @@ StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* respons std::map> inShapes; for (const auto& parameter : original_parameters) { - const auto& pshape = parameter->get_partial_shape(); + const auto& pshape = parameter->get_output_partial_shape(0); if (pshape.is_dynamic()) return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) << "Cannot set batch! Function contains parameter with partially defined shape!" << ss.str(); diff --git a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp index 6a7027327f3ccc..856ab398764146 100644 --- a/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp +++ b/inference-engine/src/inference_engine/src/cnn_network_ngraph_impl.hpp @@ -74,6 +74,8 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetworkNGraphImpl) final : public ICNNNetwor StatusCode reshape(const std::map>& inputShapes, ResponseDesc* resp) noexcept override; + StatusCode reshape(const std::map& inputShapes, + ResponseDesc* resp) noexcept override; StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const noexcept override; diff --git a/inference-engine/src/inference_engine/src/cpp/ie_cnn_network.cpp b/inference-engine/src/inference_engine/src/cpp/ie_cnn_network.cpp index 0bc58df4880d0d..0e994bad97ada2 100644 --- a/inference-engine/src/inference_engine/src/cpp/ie_cnn_network.cpp +++ b/inference-engine/src/inference_engine/src/cpp/ie_cnn_network.cpp @@ -127,6 +127,14 @@ void CNNNetwork::reshape(const ICNNNetwork::InputShapes& inputShapes) { CALL_STATUS_FNC(reshape, inputShapes); } +void CNNNetwork::reshape(const std::initializer_list& inputShapes) { + return reshape(ICNNNetwork::InputShapes(inputShapes)); +} + +void CNNNetwork::reshape(const std::map& inputShapes) { + CALL_STATUS_FNC(reshape, inputShapes); +} + void CNNNetwork::serialize(const std::string& xmlPath, const std::string& binPath) const { CALL_STATUS_FNC(serialize, xmlPath, binPath); } diff --git a/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp b/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp index 9dcde916f1c299..c6c1db7c0242a3 100644 --- a/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp +++ b/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iinfer_request_internal.cpp @@ -126,11 +126,8 @@ Blob::Ptr IInferRequestInternal::GetBlob(const std::string& name) { data = it->second->getRoiBlob(); } else { data = _inputs[name]; - checkBlob( - data, - name, - true, - foundInput->getTensorDesc().getLayout() != SCALAR ? foundInput->getTensorDesc().getDims() : oneVector); + const auto& dims = foundInput->getTensorDesc().getDims(); + checkBlob(data, name, true, foundInput->getTensorDesc().getLayout() != SCALAR ? dims : oneVector); auto& devBlob = _deviceInputs[name]; if (preProcessingRequired(foundInput, data, devBlob)) { @@ -140,11 +137,8 @@ Blob::Ptr IInferRequestInternal::GetBlob(const std::string& name) { } } else { data = _outputs[name]; - checkBlob( - data, - name, - false, - foundOutput->getTensorDesc().getLayout() != SCALAR ? foundOutput->getTensorDesc().getDims() : oneVector); + const auto& dims = foundOutput->getTensorDesc().getDims(); + checkBlob(data, name, false, foundOutput->getTensorDesc().getLayout() != SCALAR ? dims : oneVector); } return data; } @@ -252,6 +246,7 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, IE_THROW(NotAllocated) << strNotAllocated; } size_t refSize; + bool isDynamic = false; if (refDims.empty()) { SizeVector dims; if (isInput) { @@ -263,6 +258,7 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, if (foundInputPair == std::end(_networkInputs)) { IE_THROW(NotFound) << "Failed to find input with name: \'" << name << "\'"; } + isDynamic = foundInputPair->second->getInputData()->getPartialShape().is_dynamic(); dims = foundInputPair->second->getTensorDesc().getDims(); refSize = foundInputPair->second->getTensorDesc().getLayout() != SCALAR ? details::product(dims) : 1; } else { @@ -274,14 +270,22 @@ void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, if (foundOutputPair == std::end(_networkOutputs)) { IE_THROW(NotFound) << "Failed to find output with name: \'" << name << "\'"; } - dims = foundOutputPair->second->getTensorDesc().getDims(); + isDynamic = foundOutputPair->second->getPartialShape().is_dynamic(); + ngraph::PartialShape blobPartialShape(blob->getTensorDesc().getDims()); + if (foundOutputPair->second->getPartialShape().compatible(blobPartialShape)) { + dims = blob->getTensorDesc().getDims(); + } else { + // TODO: it is strange to request tensor desc from data when the shapes are not compatible, probably we + // need to immediately throw here + dims = foundOutputPair->second->getTensorDesc().getDims(); + } refSize = foundOutputPair->second->getTensorDesc().getLayout() != SCALAR ? details::product(dims) : 1; } } else { refSize = details::product(refDims); } - if (refSize != blob->size()) { + if (!isDynamic && refSize != blob->size()) { IE_THROW() << strNotMatched + ": got " << blob->size() << " expecting " << refSize; } const bool remoteBlobPassed = blob->is(); diff --git a/inference-engine/src/inference_engine/src/ie_data.cpp b/inference-engine/src/inference_engine/src/ie_data.cpp index 24a7b8b020d5b9..aa89b34d9f23c3 100644 --- a/inference-engine/src/inference_engine/src/ie_data.cpp +++ b/inference-engine/src/inference_engine/src/ie_data.cpp @@ -77,7 +77,22 @@ Data::Data(const std::string& name, Precision _precision, Layout layout) _impl = std::make_shared(); } -Data::Data(const std::string& name, const TensorDesc& desc) : name(name), userObject({0}), tensorDesc(desc) { +Data::Data(const std::string& name, const TensorDesc& desc) + : name(name), + userObject({0}), + tensorDesc(desc), + pShape(desc.getDims()) { + _impl = std::make_shared(); +} + +Data::Data(const std::string& name, Precision _precision, const ngraph::PartialShape& shape, Layout layout) + : name(name), + userObject({0}), + tensorDesc(_precision, layout), + pShape(shape) { + if (pShape.is_static()) { + tensorDesc.reshape(pShape.to_shape(), tensorDesc.getLayout()); + } _impl = std::make_shared(); } @@ -95,6 +110,15 @@ bool Data::isInitialized() const { void Data::setDims(const SizeVector& a_dims) { tensorDesc.setDims(a_dims); + pShape = ngraph::PartialShape(a_dims); +} + +bool Data::isDynamic() const { + return tensorDesc.getDims().empty() && tensorDesc.getLayout() != SCALAR && pShape.is_dynamic(); +} + +const ngraph::PartialShape& Data::getPartialShape() const { + return pShape; } void Data::setLayout(Layout layout) { @@ -103,9 +127,27 @@ void Data::setLayout(Layout layout) { void Data::reshape(const SizeVector& a_dims, Layout a_layout) { tensorDesc.reshape(a_dims, a_layout); + pShape = ngraph::PartialShape(a_dims); +} + +void Data::reshape(const std::initializer_list& dims, Layout layout) { + reshape(SizeVector(dims), layout); } -Data::Data(const Data& data) : name(data.name), userObject(data.userObject), tensorDesc(data.tensorDesc) { +void Data::reshape(const ngraph::PartialShape& dims, Layout layout) { + if (dims.is_static()) { + reshape(SizeVector(dims.to_shape()), layout); + } else { + tensorDesc = TensorDesc(tensorDesc.getPrecision(), layout); + pShape = dims; + } +} + +Data::Data(const Data& data) + : name(data.name), + userObject(data.userObject), + tensorDesc(data.tensorDesc), + pShape(data.pShape) { _impl = std::make_shared(); _impl->creatorLayer = data._impl->creatorLayer; _impl->inputTo = data._impl->inputTo; @@ -116,6 +158,7 @@ Data& Data::operator=(const Data& data) { name = data.name; userObject = data.userObject; tensorDesc = data.tensorDesc; + pShape = data.pShape; _impl->creatorLayer = data._impl->creatorLayer; _impl->inputTo = data._impl->inputTo; @@ -145,6 +188,11 @@ void Data::setPrecision(const Precision& precision) { } const SizeVector& Data::getDims() const { + if (isDynamic()) + IE_THROW() << "Cannot return dims for Data with dynamic shapes!"; + if (tensorDesc.getDims().empty() && tensorDesc.getLayout() != SCALAR) { + tensorDesc.setDims(pShape.to_shape()); + } return tensorDesc.getDims(); } diff --git a/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_simd_impl.hpp b/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_simd_impl.hpp index 19b00e41f3103b..f6ba9d3afbc136 100644 --- a/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_simd_impl.hpp +++ b/inference-engine/src/preprocessing/ie_preprocess_gapi_kernels_simd_impl.hpp @@ -156,7 +156,6 @@ CV_ALWAYS_INLINE void splitRowC3_Impl(const T in[], T out0[], #if MANUAL_SIMD constexpr int nlanes = VecT::nlanes; - GAPI_DbgAssert(length >= nlanes); VecT r0, r1, r2; for (; length >= nlanes;) { diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp index 27ecca826fafed..5192f1d2289e8f 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp @@ -266,7 +266,7 @@ void MyriadInferRequest::GetResult() { "Can not find tensor descriptor by plugin for {} output", ieBlobName); const auto& dynOutputDesc = descFromPlugin->second; - if (ieBlob->getTensorDesc().getLayout() != dynOutputDesc.getLayout()) { + if (ieBlob->getTensorDesc().getDims() != dynOutputDesc.getDims()) { ieBlob->deallocate(); ieBlob->getTensorDesc().reshape(dynOutputDesc.getDims(), dynOutputDesc.getLayout()); ieBlob->allocate(); diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp index 8ddefc82d603fb..5c8885d7cf7414 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp @@ -70,13 +70,15 @@ TEST_F(NGraphReshapeTests, ReshapedDynamicShapeLayout) { } CNNNetwork cnnNetwork(ngraph); - ASSERT_EQ(Layout::SCALAR, cnnNetwork.getInputsInfo()["A"]->getLayout()); + ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["A"]->getLayout()); + ASSERT_TRUE(cnnNetwork.getInputsInfo()["A"]->getInputData()->isDynamic()); ICNNNetwork::InputShapes new_shape; new_shape["A"] = ngraph::Shape{1, 3, 22, 22}; cnnNetwork.reshape(new_shape); ASSERT_EQ(Layout::NCHW, cnnNetwork.getInputsInfo()["A"]->getLayout()); + ASSERT_FALSE(cnnNetwork.getInputsInfo()["A"]->getInputData()->isDynamic()); } TEST_F(NGraphReshapeTests, ReshapeBatchReLU) { @@ -236,6 +238,120 @@ TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLUWithoutCloneFunction) { ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 25, 25})); } +TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLUStaticToDynamic) { + const ngraph::PartialShape refShape{1, 3, ngraph::Dimension::dynamic(), 25}; + std::shared_ptr ngraph; + { + ngraph::PartialShape shape({1, 3, 22, 22}); + ngraph::element::Type type(ngraph::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->set_friendly_name("data"); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ngraph::ParameterVector params = {param}; + ngraph::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); + + CNNNetwork cnnNetwork(ngraph); + std::map shapes; + shapes["data"] = refShape; + + ASSERT_NO_THROW(cnnNetwork.reshape(shapes)); + + auto changedFunction = cnnNetwork.getFunction(); + ASSERT_NE(nullptr, changedFunction); + ASSERT_TRUE(changedFunction->get_parameters()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(changedFunction->get_results()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(ngraph->get_parameters()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(ngraph->get_results()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_EQ(changedFunction->get_parameters()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(changedFunction->get_results()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(ngraph->get_parameters()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(ngraph->get_results()[0]->get_output_partial_shape(0), refShape); +} + +TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLUStaticToFullyDynamic) { + const ngraph::PartialShape refShape = ngraph::PartialShape::dynamic(); + std::shared_ptr ngraph; + { + ngraph::PartialShape shape({1, 3, 22, 22}); + ngraph::element::Type type(ngraph::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->set_friendly_name("data"); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ngraph::ParameterVector params = {param}; + ngraph::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); + ASSERT_EQ(ngraph->get_results()[0]->get_shape(), ngraph::Shape({1, 3, 22, 22})); + + CNNNetwork cnnNetwork(ngraph); + std::map shapes; + shapes["data"] = refShape; + + ASSERT_NO_THROW(cnnNetwork.reshape(shapes)); + + auto changedFunction = cnnNetwork.getFunction(); + ASSERT_NE(nullptr, changedFunction); + ASSERT_TRUE(changedFunction->get_parameters()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(changedFunction->get_results()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(ngraph->get_parameters()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(ngraph->get_results()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_EQ(changedFunction->get_parameters()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(changedFunction->get_results()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(ngraph->get_parameters()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(ngraph->get_results()[0]->get_output_partial_shape(0), refShape); +} + +TEST_F(NGraphReshapeTests, CNNReshapeSpatialReLUDynamicToDynamic) { + const ngraph::PartialShape refShape{1, 3, ngraph::Dimension::dynamic(), 25}; + std::shared_ptr ngraph; + { + ngraph::PartialShape shape({1, 3, 22, ngraph::Dimension::dynamic()}); + ngraph::element::Type type(ngraph::element::Type_t::f32); + auto param = std::make_shared(type, shape); + param->set_friendly_name("data"); + auto relu = std::make_shared(param); + auto result = std::make_shared(relu); + + ngraph::ParameterVector params = {param}; + ngraph::ResultVector results = {result}; + + ngraph = std::make_shared(results, params); + } + + ASSERT_EQ(ngraph->get_parameters()[0]->get_output_partial_shape(0), ngraph::PartialShape({1, 3, 22, ngraph::Dimension::dynamic()})); + ASSERT_EQ(ngraph->get_results()[0]->get_output_partial_shape(0), ngraph::PartialShape({1, 3, 22, ngraph::Dimension::dynamic()})); + + CNNNetwork cnnNetwork(ngraph); + std::map shapes; + shapes["data"] = refShape; + + ASSERT_NO_THROW(cnnNetwork.reshape(shapes)); + + auto changedFunction = cnnNetwork.getFunction(); + ASSERT_NE(nullptr, changedFunction); + ASSERT_TRUE(changedFunction->get_parameters()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(changedFunction->get_results()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(ngraph->get_parameters()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_TRUE(ngraph->get_results()[0]->get_output_partial_shape(0).is_dynamic()); + ASSERT_EQ(changedFunction->get_parameters()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(changedFunction->get_results()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(ngraph->get_parameters()[0]->get_output_partial_shape(0), refShape); + ASSERT_EQ(ngraph->get_results()[0]->get_output_partial_shape(0), refShape); +} + class CustomTestOp: public ngraph::op::Op { public: static constexpr ngraph::NodeTypeInfo type_info{"CustomTestLayer", 0}; diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp new file mode 100644 index 00000000000000..66af6d6397c91f --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp @@ -0,0 +1,656 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include "ie_extension.h" +#include +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/subgraph_builders.hpp" +#include "shared_test_classes/subgraph/basic_lstm.hpp" + +namespace BehaviorTestsDefinitions { +using InferRequestTests = BehaviorTestsUtils::BehaviorTestsBasic; + +// Setting empty config to LoadNetwork doesn't throw +TEST_P(InferRequestTests, SetEmptyConfig) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + InferenceEngine::ExecutableNetwork execNet; + std::map config {}; + if (targetDevice.find(CommonTestUtils::DEVICE_AUTO) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos && + targetDevice.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) { + ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, config)); + } else { + ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice)); + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration)); + } +} + +// Load correct network to Plugin to get executable network +TEST_P(InferRequestTests, canLoadCorrectNetworkToGetExecutable) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::ExecutableNetwork execNet; + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration)); +} + +TEST_P(InferRequestTests, CanCreateTwoExeNetworks) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::ExecutableNetwork execNet; + for (auto i = 0; i < 2; i++) { + ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration)); + ASSERT_NE(nullptr, cnnNet.getFunction()); + } +} + +TEST_P(InferRequestTests, CanCreateInferRequest) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); +} + +TEST_P(InferRequestTests, failToSetNullptrForInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr inputBlob = nullptr; + ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, inputBlob), + InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, failToSetEmptyInputBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob; + ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob), + InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, failToSetEmptyOutputBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob; + ASSERT_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob), + InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, failToSetNotAllocatedInput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); +} + +TEST_P(InferRequestTests, failToSetNotAllocatedOutput) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); +} + +TEST_P(InferRequestTests, failToSetBlobWithIncorrectName) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + const char incorrect_input_name[] = "incorrect_input_name"; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + blob->allocate(); + ASSERT_THROW(req.SetBlob(incorrect_input_name, blob), + InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, failToSetInputWithIncorrectSizes) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + blob->allocate(); + blob->getTensorDesc().getDims()[0]*=2; + ASSERT_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob), + InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, failToSetOutputWithIncorrectSizes) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + blob->allocate(); + blob->getTensorDesc().getDims()[0]*=2; + ASSERT_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob), + InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, canInferWithoutSetAndGetInOut) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(req.Infer()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetBlobForAsync) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(req.Infer()); + ASSERT_NO_THROW(req.StartAsync()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterGetAndSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); + ASSERT_NO_THROW(req.Infer()); + ASSERT_NO_THROW(req.StartAsync()); +} + +TEST_P(InferRequestTests, canProcessDeallocatedInputBlobAfterSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getInputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetBlobForAsync) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::Exception); + ASSERT_THROW({ req.StartAsync(); req.Wait(); }, InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterGetAndSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, canProcessDeallocatedOutputBlobAfterSetBlob) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getOutputsInfo().begin()->first, blob)); + blob->deallocate(); + ASSERT_THROW(req.Infer(), InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, secondCallGetOutputDoNotReAllocateData) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob1; + InferenceEngine::Blob::Ptr blob2; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob1 = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob2 = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_EQ(blob1.get(), blob2.get()); +} + +TEST_P(InferRequestTests, CorrectOneAsyncInferWithGetInOutWithInfWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); +} + +// Plugin correct infer request with allocating input and result BlobMaps inside plugin +TEST_P(InferRequestTests, canStartAsyncInferWithGetInOutWithStatusOnlyWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::STATUS_ONLY); + ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK || + sts == InferenceEngine::StatusCode::RESULT_NOT_READY); +} + +// Plugin correct infer request with allocating input and result BlobMaps inside plugin +TEST_P(InferRequestTests, FailedAsyncInferWithNegativeTimeForWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = + FuncTestUtils::createAndFillBlob(cnnNet.getOutputsInfo().begin()->second->getTensorDesc()); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + req.Infer(); + req.StartAsync(); + ASSERT_THROW(req.Wait(-2), InferenceEngine::Exception); +} + +TEST_P(InferRequestTests, canRun3SyncRequestsConsistentlyFromThreads) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req1 = execNet.CreateInferRequest(); + auto req2 = execNet.CreateInferRequest(); + auto req3 = execNet.CreateInferRequest(); + + + auto f1 = std::async(std::launch::async, [&] { req1.Infer();}); + auto f2 = std::async(std::launch::async, [&] { req2.Infer();}); + auto f3 = std::async(std::launch::async, [&] { req3.Infer();}); + + ASSERT_NO_THROW(f1.get()); + ASSERT_NO_THROW(f2.get()); + ASSERT_NO_THROW(f3.get()); +} + +TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyWithWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req1 = execNet.CreateInferRequest(); + auto req2 = execNet.CreateInferRequest(); + auto req3 = execNet.CreateInferRequest(); + + req1.StartAsync(); + ASSERT_NO_THROW(req1.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); + + req2.Infer(); + ASSERT_NO_THROW(req2.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); + + req3.Infer(); + ASSERT_NO_THROW(req3.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); +} + +TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req1 = execNet.CreateInferRequest(); + auto req2 = execNet.CreateInferRequest(); + auto req3 = execNet.CreateInferRequest(); + InferenceEngine::ResponseDesc response1, response2, response3; + InferenceEngine::StatusCode sts1, sts2, sts3; + + req1.Infer(); + req2.Infer(); + req3.Infer(); + + std::thread t1([&] { req1.StartAsync(); sts1 = req1.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); }); + std::thread t2([&] { req2.StartAsync(); sts2 = req2.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); }); + std::thread t3([&] { req3.StartAsync(); sts3 = req3.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); }); + + t1.join(); + t2.join(); + t3.join(); + + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts1) << response1.msg; + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts2) << response2.msg; + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts3) << response3.msg; +} + +TEST_P(InferRequestTests, canWaitWithotStartAsync) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + ASSERT_NO_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); + ASSERT_NO_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::STATUS_ONLY)); + ASSERT_NO_THROW(req.Wait(1)); +} + +TEST_P(InferRequestTests, returnDeviceBusyOnSetBlobAfterAsyncInfer) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + auto&& config = configuration; + auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)); + if (itConfig != config.end()) { + if (itConfig->second != "CPU_THROUGHPUT_AUTO") { + if (std::stoi(itConfig->second) == 0) { + GTEST_SKIP() << "Not applicable with disabled streams"; + } + } + } + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::ResponseDesc response; + + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::STATUS_ONLY); + ASSERT_EQ(InferenceEngine::StatusCode::INFER_NOT_STARTED, sts) << response.msg; + req.StartAsync(); + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts) << response.msg; + try { + req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob); + } + catch (const std::exception &e) { + std::cout << "Exception: " << e.what() << std::endl; + } + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::STATUS_ONLY); + ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK || + sts == InferenceEngine::StatusCode::RESULT_NOT_READY) << response.msg; +} + +TEST_P(InferRequestTests, returnDeviceBusyOnGetBlobAfterAsyncInfer) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::ResponseDesc response; + InferenceEngine::StatusCode sts; + req.StartAsync(); + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts) << response.msg; + try { + req.SetBlob(cnnNet.getInputsInfo().begin()->first, outputBlob); + } + catch (const std::exception &e) { + std::cout << "Exception" << e.what() << std::endl; + } +} + +TEST_P(InferRequestTests, returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + auto req = execNet.CreateInferRequest(); + auto outputBlob = req.GetBlob(cnnNet.getInputsInfo().begin()->first); + InferenceEngine::ResponseDesc response; + InferenceEngine::StatusCode sts; + req.StartAsync(); + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(static_cast(InferenceEngine::StatusCode::OK), sts) << response.msg; + + std::map perfMap; + + try { + perfMap = req.GetPerformanceCounts(); + } + catch (const std::exception &e) { + std::cout << "Exception" << e.what() << std::endl; + } +} + +class InferRequestTestsResultNotReady : public InferRequestTests { +}; + +TEST_P(InferRequestTestsResultNotReady, ReturnResultNotReadyFromWaitInAsyncModeForTooSmallTimeout) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngraph::Function + // return ngrpah::Function + // GetNetwork(3000, 380) make inference around 20ms on GNA SW + // so increases chances for getting RESULT_NOT_READY + function = SubgraphTestsDefinitions::Basic_LSTM_S::GetNetwork(300, 38); + InferenceEngine::CNNNetwork cnnNet(function); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + InferenceEngine::StatusCode sts = InferenceEngine::StatusCode::OK; + std::promise callbackTimeStamp; + auto callbackTimeStampFuture = callbackTimeStamp.get_future(); + // add a callback to the request and capture the timestamp + req.SetCompletionCallback([&]() { + callbackTimeStamp.set_value(std::chrono::system_clock::now()); + }); + req.StartAsync(); + ASSERT_NO_THROW(sts = req.Wait(InferenceEngine::InferRequest::WaitMode::STATUS_ONLY)); + // get timestamp taken AFTER return from the Wait(STATUS_ONLY) + const auto afterWaitTimeStamp = std::chrono::system_clock::now(); + // IF the callback timestamp is larger than the afterWaitTimeStamp + // then we should observe RESULT_NOT_READY + if (afterWaitTimeStamp < callbackTimeStampFuture.get()) { + ASSERT_TRUE(sts == InferenceEngine::StatusCode::RESULT_NOT_READY); + } + ASSERT_NO_THROW(req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY)); +} +} // namespace BehaviorTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_dynamic.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_dynamic.hpp new file mode 100644 index 00000000000000..52116bd19e6d50 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_dynamic.hpp @@ -0,0 +1,320 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include "ie_extension.h" +#include +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "ngraph_functions/builders.hpp" +#include +#include +#include +#include +#include +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "ngraph_functions/subgraph_builders.hpp" +#include "shared_test_classes/subgraph/basic_lstm.hpp" + + +namespace BehaviorTestsDefinitions { + +class InferRequestDynamicTests : public BehaviorTestsUtils::BehaviorTestsBasic { +public: + void SetUp() override { + std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); + function = ngraph::builder::subgraph::makeSplitConvConcat(); + } +}; + +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithoutSetShape) { + const std::string param_name = "Param_1"; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); +} + +TEST_P(InferRequestDynamicTests, InferDynamicNetworkBoundWithoutSetShape) { + const std::string param_name = "Param_1"; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension(0, 5), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); +} + + +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetBlob) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + //ASSERT_NO_THROW(req.SetShape(param_name, {1, 4, 20, 20})); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob->setShape({1, 4, 20, 20})); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); +} + +TEST_P(InferRequestDynamicTests, InferUpperBoundNetworkWithGetBlob) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension(0, 19), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + //ASSERT_NO_THROW(req.SetShape(param_name, {1, 4, 20, 20})); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob->setShape({1, 4, 20, 20})); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); +} + +TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetBlobLower) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension(2, 3), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob->setShape({1, 4, 20, 20})); + // Plugin may or may not throw in case if input tensor has dimensions that are out of bounds + //ASSERT_THROW(req.Infer(), InferenceEngine::Exception); +} + +TEST_P(InferRequestDynamicTests, InferOutOfRangeShapeNetworkWithGetBlobUpper) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension(1, 2), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob->setShape({3, 4, 20, 20})); + // Plugin may or may not throw in case if input tensor has dimensions that are out of bounds + // ASSERT_THROW(req.Infer(), InferenceEngine::Exception); +} + +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetBlob2times) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refShape2 = {2, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + const InferenceEngine::SizeVector refOutShape2 = {2, 10, 18, 18}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob->setShape(refShape)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); + + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob->setShape(refShape2)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape2); + req.Infer(); + req.StartAsync(); + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape2); +} + + +TEST_P(InferRequestDynamicTests, GetSameBlob2times) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_NO_THROW(blob->setShape(refShape)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getInputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); +} + +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetBlob) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = make_blob_with_precision({InferenceEngine::Precision::FP32, refShape, InferenceEngine::Layout::NCHW}); + blob->allocate(); + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); +} + +TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetBlob2times) { + const std::string param_name = "Param_1"; + const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refShape2 = {2, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + const InferenceEngine::SizeVector refOutShape2 = {2, 10, 18, 18}; + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + // Create CNNNetwork from ngrpah::Function + InferenceEngine::CNNNetwork cnnNet(function); + std::map shapes; + shapes[param_name] = {ngraph::Dimension::dynamic(), 4, 20, 20}; + cnnNet.reshape(shapes); + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + // Create InferRequest + InferenceEngine::InferRequest req; + InferenceEngine::Blob::Ptr blob = make_blob_with_precision({InferenceEngine::Precision::FP32, refShape, InferenceEngine::Layout::NCHW}); + blob->allocate(); + + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape); + req.Infer(); + req.StartAsync(); + InferenceEngine::StatusCode sts; + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape); + + blob = make_blob_with_precision({InferenceEngine::Precision::FP32, refShape2, InferenceEngine::Layout::NCHW}); + blob->allocate(); + ASSERT_NO_THROW(req.SetBlob(cnnNet.getInputsInfo().begin()->first, blob)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refShape2); + req.Infer(); + req.StartAsync(); + sts = req.Wait(InferenceEngine::InferRequest::WaitMode::RESULT_READY); + ASSERT_EQ(InferenceEngine::StatusCode::OK, sts); + ASSERT_NO_THROW(blob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first)); + ASSERT_EQ(blob->getTensorDesc().getDims(), refOutShape2); +} + +} // namespace BehaviorTestsDefinitions diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp index b2c5b1d348a464..171bd02fc4bdd5 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp @@ -42,6 +42,7 @@ inline std::shared_ptr makeConvPoolRelu(std::vector in inline std::shared_ptr makeSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + params.front()->set_friendly_name("Param_1"); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); auto conv1 = ngraph::builder::makeConvolution(split->output(0), ngPrc, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, diff --git a/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp b/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp index 0223262a56f842..d84f185727b4fa 100644 --- a/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp +++ b/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp @@ -325,6 +325,23 @@ TEST_F(BlobTests, canCreateBlobOnExistedMemory) { } +// SetShape +TEST_F(BlobTests, canSetShape) { + auto b = InferenceEngine::make_shared_blob( + InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {1, 2, 3}, InferenceEngine::ANY)); + b->allocate(); + + ASSERT_NO_THROW(b->setShape({4, 5, 6})); + + auto newDims = b->getTensorDesc().getDims(); + ASSERT_EQ(newDims.size(), 3); + ASSERT_EQ(newDims[0], 4); + ASSERT_EQ(newDims[1], 5); + ASSERT_EQ(newDims[2], 6); +} + + + TEST_F(BlobTests, canModifyDataInRangedFor) { InferenceEngine::SizeVector v = {1, 2, 3}; InferenceEngine::TBlob blob({ InferenceEngine::Precision::I32, v, InferenceEngine::CHW }); diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index 17bfa8723eae10..c6f4a9dd352cdf 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -508,11 +508,7 @@ const ov::PartialShape& ov::Node::get_output_partial_shape(size_t i) const { } const ngraph::Shape& ov::Node::get_shape() const { - if (get_output_size() != 1) { - stringstream es; - es << "get_shape() must be called on a node with exactly one output (" << description() << ")"; - throw ngraph::ngraph_error(es); - } + NODE_VALIDATION_CHECK(this, get_output_size() == 1, "get_shape() must be called on a node with exactly one output"); return get_output_shape(0); } diff --git a/ngraph/test/runtime/interpreter/int_executable.cpp b/ngraph/test/runtime/interpreter/int_executable.cpp index 8d24cd6f088c51..07a86efc5ab2ab 100644 --- a/ngraph/test/runtime/interpreter/int_executable.cpp +++ b/ngraph/test/runtime/interpreter/int_executable.cpp @@ -17,6 +17,34 @@ using namespace ngraph; NGRAPH_SUPPRESS_DEPRECATED_START +class TemporaryOverrideOutputs +{ + std::shared_ptr node; + std::vector orig_shapes; + +public: + TemporaryOverrideOutputs(std::shared_ptr node, + const std::vector>& args) + : node(node) + { + for (size_t i = 0; i < args.size(); ++i) + { + auto output = node->get_input_source_output(i); + orig_shapes.push_back(output.get_partial_shape()); + output.get_tensor().set_partial_shape(args[i]->get_shape()); + } + } + + ~TemporaryOverrideOutputs() + { + for (size_t i = 0; i < orig_shapes.size(); ++i) + { + auto output = node->get_input_source_output(i); + output.get_tensor().set_partial_shape(orig_shapes[i]); + } + } +}; + runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr& function, bool enable_performance_collection) : m_is_compiled{true} @@ -93,6 +121,14 @@ bool runtime::interpreter::INTExecutable::call(const vectorinputs().size(); ++i) + { + outputs.push_back(op->get_input_source_output(i)); + } + auto cloned_node = op->clone_with_new_inputs(outputs); + // get op outputs from map or create vector> op_outputs; for (size_t i = 0; i < op->get_output_size(); ++i) @@ -102,7 +138,8 @@ bool runtime::interpreter::INTExecutable::call(const vector(op->output(i)); + // Use cloned_node to create HostTensor with static dimensions + host_tensor = make_shared(cloned_node->output(i)); tensor_map.insert({tensor, host_tensor}); } else @@ -136,9 +173,10 @@ bool runtime::interpreter::INTExecutable::call(const vectorevaluate(op_outputs, op_inputs)) + // Call evaluate for cloned_node with static shapes + if (!cloned_node->evaluate(op_outputs, op_inputs)) { - evaluate_node(op, op_outputs, op_inputs); + evaluate_node(cloned_node, op_outputs, op_inputs); } if (m_performance_counters_enabled) {