diff --git a/src/common/low_precision_transformations/include/itt.hpp b/src/common/low_precision_transformations/include/itt.hpp index 37e01b9cfc4162..f9388c2facc557 100644 --- a/src/common/low_precision_transformations/include/itt.hpp +++ b/src/common/low_precision_transformations/include/itt.hpp @@ -23,12 +23,12 @@ namespace domains { } // namespace itt } // namespace low_precision } // namespace pass -} // namespace ngraph +} // namespace ov /* * RUN_ON_FUNCTION_SCOPE macro allows to disable the run_on_function pass * MATCHER_SCOPE macro allows to disable the MatcherPass if matcher isn't applied - * INTERNAL_OP_SCOPE macro allows to disable parts of internal nGraph operations if they are not used + * INTERNAL_OP_SCOPE macro allows to disable parts of internal openvino operations if they are not used */ #if defined(SELECTIVE_BUILD_ANALYZER) diff --git a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp index d3c5a04d14df6d..83e486af697ff7 100644 --- a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp @@ -29,7 +29,7 @@ namespace pass { namespace low_precision { /** -* @brief NetworkHelper class encapsulates manipulations with nGraph function. +* @brief NetworkHelper class encapsulates manipulations with ov::Model. */ class LP_TRANSFORMATIONS_API NetworkHelper { public: diff --git a/src/common/low_precision_transformations/src/batch_to_space.cpp b/src/common/low_precision_transformations/src/batch_to_space.cpp index b136d284ed5902..cc80f95707eb70 100644 --- a/src/common/low_precision_transformations/src/batch_to_space.cpp +++ b/src/common/low_precision_transformations/src/batch_to_space.cpp @@ -5,10 +5,9 @@ #include "low_precision/batch_to_space.hpp" #include -#include -#include -#include +#include "openvino/op/batch_to_space.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" #include "low_precision/network_helper.hpp" #include "itt.hpp" @@ -20,7 +19,7 @@ BatchToSpaceTransformation::BatchToSpaceTransformation(const Params& params) : L MATCHER_SCOPE(BatchToSpaceTransformation); auto matcher = pattern::wrap_type(); - ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) { + ov::graph_rewrite_callback callback = [this](pattern::Matcher& m) { auto op = m.get_match_root(); if (transformation_callback(op)) { return false; @@ -28,7 +27,7 @@ BatchToSpaceTransformation::BatchToSpaceTransformation(const Params& params) : L return transform(*context, m); }; - auto m = std::make_shared(matcher, matcher_name); + auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } @@ -45,7 +44,7 @@ bool BatchToSpaceTransformation::canBeTransformed(const TransformationContext& c return dequantization.isPerTensor(); } -bool BatchToSpaceTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher& m) { +bool BatchToSpaceTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { if (!canBeTransformed(context, m.get_match_root())) { return false; } diff --git a/src/common/low_precision_transformations/src/network_helper.cpp b/src/common/low_precision_transformations/src/network_helper.cpp index 29d7c9670bb362..b3925c850c5673 100644 --- a/src/common/low_precision_transformations/src/network_helper.cpp +++ b/src/common/low_precision_transformations/src/network_helper.cpp @@ -195,7 +195,7 @@ size_t NetworkHelper::getGroupsCount(std::shared_ptr layer) { } void NetworkHelper::removeLayer(std::shared_ptr layer) { - ngraph::replace_output_update_name(layer->output(0), layer->input_value(0)); + ov::replace_output_update_name(layer->output(0), layer->input_value(0)); } std::shared_ptr NetworkHelper::swapMultiplyAndAdd(std::shared_ptr addAfterMultiply, const int multiplyBranch) { diff --git a/src/common/low_precision_transformations/src/reshape.cpp b/src/common/low_precision_transformations/src/reshape.cpp index 487139077f5c69..0c5f83502df4e8 100644 --- a/src/common/low_precision_transformations/src/reshape.cpp +++ b/src/common/low_precision_transformations/src/reshape.cpp @@ -200,7 +200,7 @@ bool ReshapeTransformation::canBeTransformed(const TransformationContext& contex const auto inputs = op->get_output_target_inputs(0); if (inputs.size() == 1ul) { const auto consumer = inputs.begin()->get_node(); - ignorePerTensorQuantizationCheck = ngraph::as_type(consumer) != nullptr; + ignorePerTensorQuantizationCheck = ov::as_type(consumer) != nullptr; } } diff --git a/src/common/low_precision_transformations/src/space_to_batch.cpp b/src/common/low_precision_transformations/src/space_to_batch.cpp index 0c9200a2f061eb..75bf0f9dbbc559 100644 --- a/src/common/low_precision_transformations/src/space_to_batch.cpp +++ b/src/common/low_precision_transformations/src/space_to_batch.cpp @@ -5,11 +5,9 @@ #include "low_precision/space_to_batch.hpp" #include -#include -#include - -#include +#include "openvino/op/space_to_batch.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" #include "low_precision/network_helper.hpp" #include "itt.hpp" @@ -21,7 +19,7 @@ SpaceToBatchTransformation::SpaceToBatchTransformation(const Params& params) : L MATCHER_SCOPE(SpaceToBatchTransformation); auto matcher = pattern::wrap_type(); - ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) { + ov::graph_rewrite_callback callback = [this](pattern::Matcher& m) { auto op = m.get_match_root(); if (transformation_callback(op)) { return false; @@ -29,7 +27,7 @@ SpaceToBatchTransformation::SpaceToBatchTransformation(const Params& params) : L return transform(*context, m); }; - auto m = std::make_shared(matcher, matcher_name); + auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } @@ -46,7 +44,7 @@ bool SpaceToBatchTransformation::canBeTransformed(const TransformationContext& c return dequantization.isPerTensor(); } -bool SpaceToBatchTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher& m) { +bool SpaceToBatchTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { if (!canBeTransformed(context, m.get_match_root())) { return false; } diff --git a/src/common/low_precision_transformations/src/transpose.cpp b/src/common/low_precision_transformations/src/transpose.cpp index e2c098103a5677..eb40b4e183abaa 100644 --- a/src/common/low_precision_transformations/src/transpose.cpp +++ b/src/common/low_precision_transformations/src/transpose.cpp @@ -116,7 +116,7 @@ bool TransposeTransformation::canBeTransformed(const TransformationContext& cont } } if (dequantization.multiply != nullptr) { - const auto mulConst = ov::as_type_ptr(dequantization.multiplyConstant); + const auto mulConst = ov::as_type_ptr(dequantization.multiplyConstant); if (!NetworkHelper::isScalarLike(mulConst)) { return false; } diff --git a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp index d3dd47d2107737..dd6995efc6e957 100644 --- a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp @@ -163,7 +163,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext // // [1] no other consumers for FQ sitting on weights (neither Result node, nor any others - // original code includes separate checks for node being output and other consumers present; for - // ngraph it is a single check for number of consumers). + // openvino it is a single check for number of consumers). // // [2] if weights is anything except a constant with data_type other than i8; this check is overriden by // stronger check from Convolution patter which expects FQ only on weights diff --git a/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp b/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp index 0d5bad38902cb4..9c378f33b69406 100644 --- a/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp +++ b/src/common/low_precision_transformations/tests/get_dequantization_transformation.cpp @@ -26,7 +26,7 @@ using namespace ngraph::builder::subgraph; class GetDequantizationTestValues { public: FakeQuantizeOnData fakeQuantize; - // actual dequantization to create nGraph function to run NetworkHelper::getDequantization + // actual dequantization to create ov::Model to run NetworkHelper::getDequantization DequantizationOperations actualDequantization; DequantizationOperations expectedDequantization; }; diff --git a/src/common/snippets/docs/snippets_design_guide.md b/src/common/snippets/docs/snippets_design_guide.md index 83adce2171cccc..2f157b620b9574 100644 --- a/src/common/snippets/docs/snippets_design_guide.md +++ b/src/common/snippets/docs/snippets_design_guide.md @@ -236,7 +236,7 @@ Finally, the `Backend` uses the optimized `IR` to produce executable code. As shown on the figure below, `Snippets` are organized in a very similar way. ```mermaid graph LR - Source[nGraph \n model] + Source[OpenVINO \n model] subgraph Snippets direction LR subgraph Optimizer[Optimizer] @@ -244,10 +244,10 @@ As shown on the figure below, `Snippets` are organized in a very similar way. Data[Data flow \n optimizations] Converter[Convert \n IR] Control[Control flow \n optimizations] - Data-->|nGraph \nIR|Converter + Data-->|OpenVINO \nIR|Converter Converter-->|Linear \nIR|Control end - Frontend[Tokenizer]-->|nGraph \nIR|Data + Frontend[Tokenizer]-->|OpenVINO \nIR|Data Control-->|Linear \nIR|Backend[Generator] end Source --> Frontend @@ -258,13 +258,13 @@ classDef daisy1 fill:#FFE17A, stroke: #FEC91B, color: #262626 class Frontend,Optimizer,Backend steel1 class Source,Executable daisy1 ``` -Instead of a source code, `Snippets` take `nGraph` model as an input. -Then the `Tokenizer` (which is essentially a `Snippets` `Frontend`) parses an input `nGraph model`, and tries to find a part of the model that could be processed by `Snippets`. -If such a part is found, `Tokenizer` converts it to an `nGraph IR` and stores inside a `Subgraph` node. -`nGraph IR` - is one of the two `IR` types used by `Snippets`, it is simply a small `nGraph model` that can contain `Snippets`-specific operations. +Instead of a source code, `Snippets` take `OpenVINO` model as an input. +Then the `Tokenizer` (which is essentially a `Snippets` `Frontend`) parses an input `OpenVINO model`, and tries to find a part of the model that could be processed by `Snippets`. +If such a part is found, `Tokenizer` converts it to an `OpenVINO IR` and stores inside a `Subgraph` node. +`OpenVINO IR` - is one of the two `IR` types used by `Snippets`, it is simply a small `OpenVINO model` that can contain `Snippets`-specific operations. -`nGraph IR` is then passed to the `Optimizer` unit that in turn consists of three subunits. -The purpose of the first subunit is to perform data flow optimizations. The second subunit converts `nGraph IR` (data-flow-oriented representation) to `Linear IR` (control-flow-focused IR). Finally, the third subunit is dedicated to control flow optimizations. +`OpenVINO IR` is then passed to the `Optimizer` unit that in turn consists of three subunits. +The purpose of the first subunit is to perform data flow optimizations. The second subunit converts `OpenVINO IR` (data-flow-oriented representation) to `Linear IR` (control-flow-focused IR). Finally, the third subunit is dedicated to control flow optimizations. After all optimizations, the `Linear IR` is used by the `Generator` (which is `Snippets` `Backend`) to produce executable code, which we will refer to as `Kernel`. As discussed in the Introduction, the purpose of the `Kernel` is to process a part of the initial tensor, and several `Kernels` are usually executed in parallel to process the whole tensor. @@ -280,7 +280,7 @@ The `Snippets` integration into the plugin pipeline is schematically depicted be graph LR subgraph Plugin[ Plugin pipeline ] direction LR - subgraph ngraph[ Transformations on nGraph model ] + subgraph openvino[ Transformations on OpenVINO model ] direction LR common[Common \n Transformations] lpt[Low \n Precision] @@ -305,7 +305,7 @@ The `Snippets` integration into the plugin pipeline is schematically depicted be create-->execute end end - Source[nGraph \n model]-->|Main \n flow|common + Source[OpenVINO \n model]-->|Main \n flow|common convert~~~internal classDef no-bg-color fill:none,stroke-width:0px classDef steel1 fill:#B9D6E5, stroke: #86B3CA, color: #262626 @@ -315,15 +315,15 @@ class tokenize,optimize,generate steel1 class Source,Executable daisy1 class create,execute dafault_node1 ``` -As one can see from the picture, overall plugin pipeline consists of two major blocks: the first block applies transformations to `nGraph model` while the second one works with the internal plugin graph representation. Since `Snippets` is a backend-independent framework, it can't work with the plugin graph or plugin-specific `Ops` directly, so the tokenization is performed immediately before plugin-specific operations are introduced into the graph (`Conversion to Plugin opset`). -`Tokenizer` replaces parts of the `nGraph model` that can be executed by `Snippets` with `ov::op::Subgraph` nGraph nodes. -Each of the nodes stores a piece of the initial `nGraph model` that was replaced by the node. -This piece is stored as an nGraph model itself, which we refer to as `nGraph IR` to distinguish from the original `nGraph model`. +As one can see from the picture, overall plugin pipeline consists of two major blocks: the first block applies transformations to `OpenVINO model` while the second one works with the internal plugin graph representation. Since `Snippets` is a backend-independent framework, it can't work with the plugin graph or plugin-specific `Ops` directly, so the tokenization is performed immediately before plugin-specific operations are introduced into the graph (`Conversion to Plugin opset`). +`Tokenizer` replaces parts of the `OpenVINO model` that can be executed by `Snippets` with `ov::op::Subgraph` OpenVINO nodes. +Each of the nodes stores a piece of the initial `OpenVINO model` that was replaced by the node. +This piece is stored as an OpenVINO model itself, which we refer to as `OpenVINO IR` to distinguish from the original `OpenVINO model`. Note that sometimes the exact type of `IR` is not important in our discussion. -In such cases, we will refer to the `IR` (`nGraph` or `Linear`) as `body function`, or simply `body`. +In such cases, we will refer to the `IR` (`OpenVINO` or `Linear`) as `body function`, or simply `body`. -When the plugin finalizes all `nGraph model` transformations, the model is converted to an internal plugin graph representation. -At this point `ov::op::Subgraph` is converted to `ov::intel_cpu::node::Snippet` which still retains the `nGraph IR`. +When the plugin finalizes all `OpenVINO model` transformations, the model is converted to an internal plugin graph representation. +At this point `ov::op::Subgraph` is converted to `ov::intel_cpu::node::Snippet` which still retains the `OpenVINO IR`. This IR is then optimized and an executable `Kernel` is produced during the `CreateComputePrimitive` stage (`CreatePrimitive()` stage in CPU plugin). Finally, multiple copies of the produced kernel executed in parallel during the `Execute` stage. @@ -332,7 +332,7 @@ To summarize, `Snippets` workflow consists of three major blocks: `Tokenizer`, ` ### Tokenizer -`Tokenizer` is run on an `nGraph model` and its main purpose is to identify subgraphs that are suitable for code generation. +`Tokenizer` is run on an `OpenVINO model` and its main purpose is to identify subgraphs that are suitable for code generation. These subgraphs are then replaced with the `ov::op::Subgraph` node. This stage is called tokenization because the `Tokenizer` employs a greedy algorithm similar to the ones used for parsing input stream of characters into tokens. One of the distinctive features of this algorithm is its flexibility, so it can seamlessly handle arbitrary operations' patterns. @@ -371,8 +371,8 @@ The tokenization algorithm is depicted on the flowchart below. ``` Let us briefly describe the process: 1. If a Node is not supported by `Snippets`, then ignore it and proceed to the next one. -2. If a Node has no `Subgraph` parents, then replace it with `Subgraph` node and copy the initial Node to the `Subgraph's` body (which is in the `nGraph IR` form). -3. If a Node has a single `Subgraph` parent, then attach it to the `Subgraph`. It means copy the Node to the `Subgraph's` body, and remove it from the original `nGraph model`. Note that if the Node has more than one parent, corresponding parents' outputs will be connected with the updated `Subgraph` as shown on the diagram below. +2. If a Node has no `Subgraph` parents, then replace it with `Subgraph` node and copy the initial Node to the `Subgraph's` body (which is in the `OpenVINO IR` form). +3. If a Node has a single `Subgraph` parent, then attach it to the `Subgraph`. It means copy the Node to the `Subgraph's` body, and remove it from the original `OpenVINO model`. Note that if the Node has more than one parent, corresponding parents' outputs will be connected with the updated `Subgraph` as shown on the diagram below. 4. If a Node has multiple `Subgraph` parents, then they will be merged into a single `Subgraph` and the Node will be attached to it. ```mermaid graph LR @@ -409,7 +409,7 @@ If a `Constant` is not scalar, then it can't be tokenized since storing `Constan Please refer to the [collapse_subgraph.cpp](../src/pass/collapse_subgraph.cpp) to gain more insights on the tokenization process. There is however one more aspect of the tokenization process that is worth covering here. -As discussed in the **Plugin integration** section above, the `Tokenizer` is executed before the plugin converts the `nGraph model` to an internal graph representation. +As discussed in the **Plugin integration** section above, the `Tokenizer` is executed before the plugin converts the `OpenVINO model` to an internal graph representation. It means that the tokenized nodes will not be visible to the plugin (since they are hidden inside `Subrgaphs'` body functions), so they will be ignored by plugin optimization passes. In particular, the plugin won't be able to fuse the nodes using the OneDNN post-ops mechanism. This type of fusings is backend-specific, therefore can't be supported by `Snippets` directly, but it's still important from the performance perspective. @@ -424,15 +424,15 @@ Please, refer to the [snippets_mark_skipped.cpp](../../../plugins/intel_cpu/src/ As briefly discussed in the ***Architecture*** section, `Optimizer` consists of two major units: the first one performs data flow optimization, and the second one is focused on control flow. Note however that some data-flow-related passes can be performed only after the control flow optimizations, so the second unit modifies the dataflow as well. Nevertheless, we will refer to the units as `Data flow optimizer` and `Control flow optimizer` to reflect their main purpose. -Keep in mind that, as discussed above, the `Data flow optimizer` operates exclusively on the `nGraph IR`, while the `Control flow optimizer` works with the `Linear IR`. +Keep in mind that, as discussed above, the `Data flow optimizer` operates exclusively on the `OpenVINO IR`, while the `Control flow optimizer` works with the `Linear IR`. We will discuss these units in more detail below. #### Data flow optimizer Before `Data flow optimizer` can modify data flow, it needs to perform a preliminary stage called `Canonicalization`. To understand the stage's purpose we need to make a step back to the tokenization. - The `Tokenizer` saves a part of the initial `nGraph function` in `Subgraph's` body. - The problem is that the `nGraph function` has no information about data layouts that will be used by the `Subgraph's` parents during the `Execution` stage. + The `Tokenizer` saves a part of the initial `OpenVINO function` in `Subgraph's` body. + The problem is that the `OpenVINO function` has no information about data layouts that will be used by the `Subgraph's` parents during the `Execution` stage. This happens because the plugin assigns layouts on internal graph representation well after the tokenization is finished. The purpose of `Canonicalization` is to incorporate the plugin-defined input layouts into the body function. If an input's layout was changed to a blocked one, then the corresponding body input `Parameter` will be reshaped, and new shapes will be propagated through the body function. @@ -485,17 +485,17 @@ The managers will be executed on different stages of the pipeline to enable more #### Control flow optimizer As follows from its name, the main objective of `Control flow optimizer` is to manage and optimize control flow of the kernel. -Since the `nGraph IR` doesn't have an explicit control flow representation, a special control-flow-oriented `IR` was developed. +Since the `OpenVINO IR` doesn't have an explicit control flow representation, a special control-flow-oriented `IR` was developed. It is called `Linear IR` (or simply `LIR`), let's discuss it first, before we consider the transformation pipeline. ##### Linear Intermediate Representation `Linear IR` is specially designed to facilitate manipulations with control flow. -It is called linear, because it is essentially a sequence of `Expressions` (an analog of nGraph `Op`) that represents control flow. +It is called linear, because it is essentially a sequence of `Expressions` (an analog of OpenVINO `Op`) that represents control flow. So if `Expression 1` is followed by `Expression 2` in `LIR` then the code for `Expression 1` will be emitted before the code for `Expression 2`. Note that this doesn't necessarily mean that the `Expression 2` uses the result of `Expression 1`, they can be completely unrelated from the data flow standpoint. The only restriction here is that all the `Expression's` inputs must be ready by the time it is executed. -This restriction is the same as in `nGraph IR`, but an important distinction here is that `LIR` allows to permute `Expressions` while this data-dependency condition is fulfilled. +This restriction is the same as in `OpenVINO IR`, but an important distinction here is that `LIR` allows to permute `Expressions` while this data-dependency condition is fulfilled. So the `LIR` preserves data dependencies, but also allows for a more control on expressions' order that represents control flow. This is a brief rationale behind the linear `IR`, now let's move to the implementation. @@ -536,13 +536,13 @@ flowchart LR class consumers no-bg ``` -`LinearIR` is our graph representation, it's an analog to an nGraph model. +`LinearIR` is our graph representation, it's an analog to an OpenVINO model. It is simply a container for `Expressions`, the order of `Expressions` represents control flow. -`LIR` also incorporates a range of useful methods to manage the `Expressions`, for example `create_expression(...)` to build `Expressions` from nGraph nodes, or `replace_input(...)` to modify data dependencies between `Expressions`. +`LIR` also incorporates a range of useful methods to manage the `Expressions`, for example `create_expression(...)` to build `Expressions` from OpenVINO nodes, or `replace_input(...)` to modify data dependencies between `Expressions`. Please refer to the implementation in [linear_ir.cpp](../src/lowered/linear_ir.cpp) for more details. `Expression` is the main building block of a `Linear IR`. -It contains a pointer to the nGraph node it was created from and a pointer to the emitter it will be mapped to (which is null until `Expression::init_emitter(...)` is called). +It contains a pointer to the OpenVINO node it was created from and a pointer to the emitter it will be mapped to (which is null until `Expression::init_emitter(...)` is called). An `Expression` can have an arbitrary number of inputs and outputs, we will refer to them simply as ports. Every port can be uniquely identified by the `ExpressionPort` class. The `ExpressionPort` contains a pointer to the `Expression` which port it represents, the port type (`input` or `output`) and its index (input/output number). @@ -556,7 +556,7 @@ This information will be used by the control flow optimization pipeline to deter An `Expression` internally stores two separate vectors of input and output `PortDescriptors` which could be accessed by calling `get_input_port_descriptors()` or `get_input_port_descriptor(i)` (and similar for outputs). Finally, `PortConnectors` specify how the `Expression's` ports are connected. -Note that an `Expression` output can be connected to several inputs (like with nGraph nodes), So every `PortConnector` stores one source `ExpressionPort` and a set of consumer `ExpressionPorts` that can be accessed by the `get_source()` or `get_consumers()` methods, respectively. +Note that an `Expression` output can be connected to several inputs (like with OpenVINO nodes), So every `PortConnector` stores one source `ExpressionPort` and a set of consumer `ExpressionPorts` that can be accessed by the `get_source()` or `get_consumers()` methods, respectively. Like with `PortDescriptors`, an `Expression` stores input and output `PortConnectors` in two separate vectors accessed via `get_input_port_connector(i)` (or its output twin). An example on how `PortConnectors` can be used to move between `Expressions` is given on the right side of the above picture. @@ -622,7 +622,7 @@ Please see [assign_registers.cpp](../src/lowered/pass/assign_registers.cpp) and When the `Preparation` is finished, the `Generator` constructs target-specific emitters by calling `init_emitter(target)` method for every `Expression` in the `LinearIR`, where the `target` is a `TargetMachine` instance. The `TargetMachine` is a class that provides generator with target-specific information, such as supported instruction sets, vector register size etc. -`TargetMachine` also maps the nGraph's `DiscreteTypeInfo` (stored in the `Expression`) to the emitter that actually implements the operation. +`TargetMachine` also maps the OpenVINO's `DiscreteTypeInfo` (stored in the `Expression`) to the emitter that actually implements the operation. The mapping is done using the `jitters` map defined in [target_machine.hpp](../include/snippets/target_machine.hpp). In order for this mechanism to work, every `Snippets'` code generation backend should create emitter implementations derived from the `Emitter` base class defined in [emitter.hpp](../include/snippets/emitter.hpp). The backend then should create its own target machine class (derived from the common `TargetMachine`) and populate the `jitters` map, see the [cpu_generator.cpp](../../../plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp) for an implementation example. diff --git a/src/common/snippets/include/snippets/itt.hpp b/src/common/snippets/include/snippets/itt.hpp index 0c594165ab5776..4a617f5a06e645 100644 --- a/src/common/snippets/include/snippets/itt.hpp +++ b/src/common/snippets/include/snippets/itt.hpp @@ -9,7 +9,7 @@ #pragma once -#include +#include namespace ov { namespace pass { @@ -26,7 +26,7 @@ OV_CC_DOMAINS(internal_op); /* * RUN_ON_FUNCTION_SCOPE macro allows to disable the run_on_function pass * MATCHER_SCOPE macro allows to disable the MatcherPass if matcher isn't applied - * INTERNAL_OP_SCOPE macro allows to disable parts of internal nGraph operations if they are not used + * INTERNAL_OP_SCOPE macro allows to disable parts of internal openvino operations if they are not used */ #if defined(SELECTIVE_BUILD_ANALYZER) diff --git a/src/common/snippets/include/snippets/op/loop.hpp b/src/common/snippets/include/snippets/op/loop.hpp index fefc1368bb4307..1fd51649fc65d1 100644 --- a/src/common/snippets/include/snippets/op/loop.hpp +++ b/src/common/snippets/include/snippets/op/loop.hpp @@ -6,7 +6,7 @@ #include "openvino/op/op.hpp" #include "snippets/emitter.hpp" -#include "ngraph/op/parameter.hpp" +#include "openvino/op/parameter.hpp" namespace ov { namespace snippets { diff --git a/src/common/snippets/include/snippets/op/powerstatic.hpp b/src/common/snippets/include/snippets/op/powerstatic.hpp index 5a1d0abb23ffb4..d76fa48e0601aa 100644 --- a/src/common/snippets/include/snippets/op/powerstatic.hpp +++ b/src/common/snippets/include/snippets/op/powerstatic.hpp @@ -5,7 +5,6 @@ #pragma once #include "openvino/op/op.hpp" -#include #include namespace ov { diff --git a/src/common/snippets/include/snippets/op/scalar.hpp b/src/common/snippets/include/snippets/op/scalar.hpp index 43ecb1aad671cc..2720ffdc062091 100644 --- a/src/common/snippets/include/snippets/op/scalar.hpp +++ b/src/common/snippets/include/snippets/op/scalar.hpp @@ -5,7 +5,7 @@ #pragma once #include "openvino/op/op.hpp" -#include "ngraph/op/constant.hpp" +#include "openvino/op/constant.hpp" namespace ov { namespace snippets { diff --git a/src/common/snippets/include/snippets/op/subgraph.hpp b/src/common/snippets/include/snippets/op/subgraph.hpp index dab2de53e56d47..a9321e957e273c 100644 --- a/src/common/snippets/include/snippets/op/subgraph.hpp +++ b/src/common/snippets/include/snippets/op/subgraph.hpp @@ -190,10 +190,10 @@ class Subgraph : public ov::op::util::SubGraphOp { std::shared_ptr m_shape_infer = nullptr; - class NgraphShapeInfer : public ShapeInferSnippetsNode { - std::shared_ptr m_ngraph_body; + class OVShapeInfer : public ShapeInferSnippetsNode { + std::shared_ptr m_ov_body; public: - explicit NgraphShapeInfer(const std::shared_ptr& body); + explicit OVShapeInfer(const std::shared_ptr& body); Result infer(const std::vector& input_shapes) override; }; }; diff --git a/src/common/snippets/include/snippets/pass/propagate_precision.hpp b/src/common/snippets/include/snippets/pass/propagate_precision.hpp index 1f5bd0cf9542bf..6f805cb1b68808 100644 --- a/src/common/snippets/include/snippets/pass/propagate_precision.hpp +++ b/src/common/snippets/include/snippets/pass/propagate_precision.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include "openvino/pass/pass.hpp" #include "snippets/generator.hpp" namespace ov { diff --git a/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp b/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp index af7d29f8e3f3c3..9066d571cbb4e6 100644 --- a/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp +++ b/src/common/snippets/include/snippets/shape_inference/shape_inference.hpp @@ -38,7 +38,7 @@ class IShapeInferSnippets { }; /** - * Shape inference class for Subgraph node (both nGraph and Linear IRs). + * Shape inference class for Subgraph node (both openvino and Linear IRs). * It stores the result of the last shape inference, so it can be reused in optimization pipeline. * */ diff --git a/src/common/snippets/src/lowered/expression_factory.cpp b/src/common/snippets/src/lowered/expression_factory.cpp index 34651fd6dbbbd2..cd5cfe0db74c53 100644 --- a/src/common/snippets/src/lowered/expression_factory.cpp +++ b/src/common/snippets/src/lowered/expression_factory.cpp @@ -69,7 +69,7 @@ ExpressionPtr LinearIR::ExpressionFactory::create(const std::shared_ptr(new IOExpression(res, model->get_result_index(res), linear_ir.m_shape_infer_factory)); create_expression_inputs(linear_ir, expr); - // The Result node don't need output port (because of sense of the node). But each node in ngraph must have one output at least. + // The Result node don't need output port (because of sense of the node). But each node in openvino must have one output at least. // The port descriptors are automatically created in constructor. We manually clean output ports. expr->m_output_port_descriptors.clear(); expr->validate(); @@ -110,7 +110,7 @@ ExpressionPtr LinearIR::ExpressionFactory::create(const std::shared_ptr(last_input.get_expr()->get_node()), "LoopEnd expression expects LoopBegin on last input"); expr->m_input_port_descriptors[inputs.size() - 1] = last_input.get_descriptor_ptr()->clone(); init_expression_inputs(expr, inputs); - // The LoopEnd node don't need output port (because of sense of the node). But each node in ngraph must have one output at least. + // The LoopEnd node don't need output port (because of sense of the node). But each node in openvino must have one output at least. // The port descriptors are automatically created in constructor. We manually clean output ports. expr->m_output_port_descriptors.clear(); expr->validate(); diff --git a/src/common/snippets/src/lowered/pass/identify_buffers.cpp b/src/common/snippets/src/lowered/pass/identify_buffers.cpp index 02aabc93ead6ac..d411da67af38d6 100644 --- a/src/common/snippets/src/lowered/pass/identify_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/identify_buffers.cpp @@ -36,7 +36,7 @@ std::vector IdentifyBuffers::create_adjacency_matrix(const LinearIR& linea auto get_buffer_idx = [&](const std::shared_ptr& buffer) { const auto iter = std::find(buffers.cbegin(), buffers.cend(), buffer); - NGRAPH_CHECK(iter != buffers.cend(), "Buffer wasn't find in Buffer system of Subgraph"); + OPENVINO_ASSERT(iter != buffers.cend(), "Buffer wasn't find in Buffer system of Subgraph"); return std::distance(buffers.cbegin(), iter); }; diff --git a/src/common/snippets/src/op/brgemm.cpp b/src/common/snippets/src/op/brgemm.cpp index b64a4328a83b1c..5cce5d85c13a82 100644 --- a/src/common/snippets/src/op/brgemm.cpp +++ b/src/common/snippets/src/op/brgemm.cpp @@ -127,7 +127,7 @@ ov::PartialShape Brgemm::get_planar_output_shape(const ov::PartialShape& output_ } ov::PartialShape Brgemm::get_output_partial_shape(const std::vector& input_shapes) const { - NGRAPH_CHECK(input_shapes.size() == 2, "BRGEMM expects 2 input shapes for shape inference"); + OPENVINO_ASSERT(input_shapes.size() == 2, "BRGEMM expects 2 input shapes for shape inference"); // Note: All majors checks are missed because Brgemm is transformed from MatMul with whole shape infer support diff --git a/src/common/snippets/src/op/fill.cpp b/src/common/snippets/src/op/fill.cpp index 437f594cdfc519..05f79495ae1748 100644 --- a/src/common/snippets/src/op/fill.cpp +++ b/src/common/snippets/src/op/fill.cpp @@ -32,7 +32,7 @@ std::shared_ptr Fill::clone_with_new_inputs(const OutputVector& new_args) void Fill::validate_and_infer_types() { INTERNAL_OP_SCOPE(Fill_validate_and_infer_types); const auto in_type = get_input_element_type(0); - NGRAPH_CHECK(in_type.size() == 4, "Fill operation supports only element types with 4 byte size but got:" + std::to_string(in_type.size())); + OPENVINO_ASSERT(in_type.size() == 4, "Fill operation supports only element types with 4 byte size but got:" + std::to_string(in_type.size())); set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } diff --git a/src/common/snippets/src/op/load.cpp b/src/common/snippets/src/op/load.cpp index d1a7d0f2cb523e..868ed4294e6dab 100644 --- a/src/common/snippets/src/op/load.cpp +++ b/src/common/snippets/src/op/load.cpp @@ -40,13 +40,13 @@ std::shared_ptr Load::clone_with_new_inputs(const OutputVector& new_args) LoadReshape::LoadReshape(const Output& x, const size_t count, const size_t offset, std::vector order) : Load(x, count, offset), m_order(std::move(order)) { const auto& in_shape = x.get_partial_shape(); - NGRAPH_CHECK(in_shape.is_static(), "LoadReshape supports only static input shapes"); + OPENVINO_ASSERT(in_shape.is_static(), "LoadReshape supports only static input shapes"); const auto in_shape_size = in_shape.size(); - NGRAPH_CHECK(m_order.size() == in_shape_size, "LoadReshape got new_order of invalid size"); - NGRAPH_CHECK(*std::max_element(m_order.begin(), m_order.end()) == in_shape_size - 1 && + OPENVINO_ASSERT(m_order.size() == in_shape_size, "LoadReshape got new_order of invalid size"); + OPENVINO_ASSERT(*std::max_element(m_order.begin(), m_order.end()) == in_shape_size - 1 && *std::min_element(m_order.begin(), m_order.end()) == 0, "LoadReshape detected invalid values in new_order"); const std::set unique_dims(order.begin(), order.end()); - NGRAPH_CHECK(unique_dims.size() == order.size(), "LoadReshape order must not contain repeated elements"); + OPENVINO_ASSERT(unique_dims.size() == order.size(), "LoadReshape order must not contain repeated elements"); constructor_validate_and_infer_types(); } diff --git a/src/common/snippets/src/op/memory_access.cpp b/src/common/snippets/src/op/memory_access.cpp index 117c1bd14e2e7f..f98d72be7f94f5 100644 --- a/src/common/snippets/src/op/memory_access.cpp +++ b/src/common/snippets/src/op/memory_access.cpp @@ -73,25 +73,25 @@ bool MemoryAccess::is_memory_access_output_port(size_t idx) const { void MemoryAccess::set_input_port_descriptor(const PortDescriptor& desc, const size_t i) { const auto it = m_input_ports.find(i); - NGRAPH_CHECK(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); + OPENVINO_ASSERT(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); (*it).second = { desc.count, desc.offset, i}; } void MemoryAccess::set_output_port_descriptor(const PortDescriptor& desc, const size_t i) { const auto it = m_output_ports.find(i); - NGRAPH_CHECK(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); + OPENVINO_ASSERT(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); (*it).second = { desc.count, desc.offset, i}; } const MemoryAccess::PortDescriptor& MemoryAccess::get_input_port_descriptor(const size_t i) const { const auto it = m_input_ports.find(i); - NGRAPH_CHECK(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); + OPENVINO_ASSERT(it != m_input_ports.end(), "Index of input port descriptor should be less than count of input ports"); return (*it).second; } const MemoryAccess::PortDescriptor& MemoryAccess::get_output_port_descriptor(const size_t i) const { const auto it = m_output_ports.find(i); - NGRAPH_CHECK(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); + OPENVINO_ASSERT(it != m_output_ports.end(), "Index of output port descriptor should be less than count of output ports"); return (*it).second; } diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index 5de4dae47a95a4..dc13bb3e8bb716 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -160,7 +160,7 @@ Subgraph::Subgraph(const OutputVector& args, const std::shared_ptr& b for (size_t i = 0; i < body->get_output_size(); ++i) m_output_descriptions[0].push_back(std::make_shared(i, i)); m_transformations_allowed = false; - m_shape_infer = std::make_shared(body); + m_shape_infer = std::make_shared(body); } Subgraph::Subgraph(const NodeVector& args, const std::shared_ptr& body) @@ -292,7 +292,7 @@ auto Subgraph::wrap_node_as_subgraph(const std::shared_ptr& node) -> s } void Subgraph::fill_empty_output_names(const Output& target_output_node, const Output& replacement_output_node) { - NGRAPH_SUPPRESS_DEPRECATED_START + OPENVINO_SUPPRESS_DEPRECATED_START auto& out_tensor = target_output_node.get_tensor(); const std::string new_name = ov::op::util::get_ie_output_name(replacement_output_node); if (ov::descriptor::get_ov_tensor_legacy_name(out_tensor).empty()) { @@ -301,7 +301,7 @@ void Subgraph::fill_empty_output_names(const Output& target_output_node, c if (!replacement_output_node.get_names().empty()) { out_tensor.set_names(replacement_output_node.get_names()); } - NGRAPH_SUPPRESS_DEPRECATED_END + OPENVINO_SUPPRESS_DEPRECATED_END } auto Subgraph::constant_input_should_be_inside_body(const std::shared_ptr& node) -> bool { @@ -484,18 +484,18 @@ IShapeInferSnippets::Result Subgraph::shape_infer(const std::vectorinfer(input_shapes); } -Subgraph::NgraphShapeInfer::NgraphShapeInfer(const std::shared_ptr& body) : - m_ngraph_body(body) { - OPENVINO_ASSERT(m_ngraph_body, "Can't initialize shape infer with empty body"); +Subgraph::OVShapeInfer::OVShapeInfer(const std::shared_ptr& body) : + m_ov_body(body) { + OPENVINO_ASSERT(m_ov_body, "Can't initialize shape infer with empty body"); } -IShapeInferSnippets::Result Subgraph::NgraphShapeInfer::infer(const std::vector& input_shapes) { - const ParameterVector& parameters = m_ngraph_body->get_parameters(); - const ResultVector& results = m_ngraph_body->get_results(); +IShapeInferSnippets::Result Subgraph::OVShapeInfer::infer(const std::vector& input_shapes) { + const ParameterVector& parameters = m_ov_body->get_parameters(); + const ResultVector& results = m_ov_body->get_results(); OPENVINO_ASSERT(parameters.size() == input_shapes.size(), "Got invalid number of input shapes to reshape subgraph body"); for (size_t i = 0; i < parameters.size(); ++i) parameters[i]->set_partial_shape(utils::vdims_to_pshape(input_shapes[i].get())); - m_ngraph_body->validate_nodes_and_infer_types(); + m_ov_body->validate_nodes_and_infer_types(); std::vector outputDims; for (const auto& res : results) outputDims.emplace_back(utils::pshape_to_vdims(res->get_input_partial_shape(0))); @@ -702,7 +702,7 @@ snippets::Schedule Subgraph::generate(const std::vector diff --git a/src/common/snippets/src/pass/hash.cpp b/src/common/snippets/src/pass/hash.cpp index 48dd9586ae4337..2f975ef2cbccee 100644 --- a/src/common/snippets/src/pass/hash.cpp +++ b/src/common/snippets/src/pass/hash.cpp @@ -10,8 +10,6 @@ #include #include -#include "ngraph/ops.hpp" -#include "ngraph/opsets/opset.hpp" #include "openvino/core/except.hpp" #include "openvino/core/meta_data.hpp" #include "openvino/core/model.hpp" @@ -169,7 +167,7 @@ class SnippetsHasher : public ov::AttributeVisitor { m_node_type_name(node_type_name) {} void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override { - if (const auto& a = ov::as_type>>(&adapter)) { + if (const auto& a = ov::as_type>>(&adapter)) { m_hash = hash_combine(hash_combine(m_hash, name), a->get()->get_info().variable_id); } else if (const auto& a = ov::as_type>>(&adapter)) { diff --git a/src/common/snippets/src/pass/propagate_precision.cpp b/src/common/snippets/src/pass/propagate_precision.cpp index 6ba1f5f3d09ad1..568db74d6a5c0a 100644 --- a/src/common/snippets/src/pass/propagate_precision.cpp +++ b/src/common/snippets/src/pass/propagate_precision.cpp @@ -32,7 +32,7 @@ bool ov::snippets::pass::PropagatePrecision::run_on_model(const std::shared_ptr< auto type_info = op->get_type_info(); std::set supported_precisions; // TODO: At the moment Softmax is decomposed on Linear IR level. - // When Softmax will be decomposed on NGraph level, remove it + // When Softmax will be decomposed on openvino level, remove it if (type_info.is_castable(ov::op::v1::Softmax::get_type_info_static())) { supported_precisions = {{ov::element::f32}}; } else { diff --git a/src/common/snippets/src/pass/softmax_reshape_elimination.cpp b/src/common/snippets/src/pass/softmax_reshape_elimination.cpp index 2f60f1e1155c76..36a0afb7c11325 100644 --- a/src/common/snippets/src/pass/softmax_reshape_elimination.cpp +++ b/src/common/snippets/src/pass/softmax_reshape_elimination.cpp @@ -10,7 +10,7 @@ #include "openvino/core/rt_info.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" -#include +#include "openvino/core/validation_util.hpp" ov::snippets::pass::SoftmaxReshapeElimination::SoftmaxReshapeElimination() { MATCHER_SCOPE(SoftmaxReshapeElimination); diff --git a/src/common/transformations/CMakeLists.txt b/src/common/transformations/CMakeLists.txt index 67907b0c265d5c..e7d365ca32492e 100644 --- a/src/common/transformations/CMakeLists.txt +++ b/src/common/transformations/CMakeLists.txt @@ -50,7 +50,7 @@ target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) # even the Transformations library is supposed to be Plugin API # we still have some code compiled as transformations, but headers are -# part of ngraph core API +# part of openvino core API # so, we need to mark this library as important for ABI free ov_abi_free_target(${TARGET_NAME}_obj) diff --git a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp index 61f43c937be16e..7dcf6a9c44b3c3 100644 --- a/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp +++ b/src/common/transformations/tests/common_optimizations/fq_mul_fusion_test.cpp @@ -13,7 +13,6 @@ #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" #include "functional_test_utils/plugin_cache.hpp" -#include "ie_core.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset4.hpp" #include "openvino/pass/manager.hpp" diff --git a/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp b/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp index 8e92a5e3f7797f..8127ad129ef34b 100644 --- a/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp +++ b/src/common/transformations/tests/common_optimizations/fq_reshape_fusion.cpp @@ -10,7 +10,6 @@ #include #include -#include "cnn_network_ngraph_impl.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset4.hpp" @@ -19,7 +18,6 @@ using namespace ov; using namespace testing; -using namespace InferenceEngine; namespace { @@ -32,8 +30,8 @@ struct FQReshapeFusionTestCase { bool is_negative; }; -class nGraphFQReshapeFusionTests : public ov::test::TestsCommon, - public testing::WithParamInterface> { +class FQReshapeFusionTests : public ov::test::TestsCommon, + public testing::WithParamInterface> { public: std::shared_ptr f, ref_f; @@ -115,7 +113,7 @@ class nGraphFQReshapeFusionTests : public ov::test::TestsCommon, } }; -TEST_P(nGraphFQReshapeFusionTests, ReshapeMatMul) { +TEST_P(FQReshapeFusionTests, ReshapeMatMul) { auto unh = std::make_shared(); pass::Manager manager; manager.register_pass(unh); @@ -134,7 +132,7 @@ TEST_P(nGraphFQReshapeFusionTests, ReshapeMatMul) { INSTANTIATE_TEST_SUITE_P( NGraph, - nGraphFQReshapeFusionTests, + FQReshapeFusionTests, testing::Values( // positive FQReshapeFusionTestCase{{1, 2, 1, 3}, diff --git a/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp b/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp index 61d236a6355628..4fa1af8088d6b2 100644 --- a/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp +++ b/src/common/transformations/tests/common_optimizations/mish_fusion_test.cpp @@ -19,7 +19,7 @@ using namespace ov; using namespace testing; -// LPT to nGraph migration: temporary disabling unexpected not reproduced fails on CI: +// LPT to openvino migration: temporary disabling unexpected not reproduced fails on CI: // https://openvino-ci.intel.com/job/private-ci/job/ie/job/build-linux-ubuntu18_i386/478/ TEST_F(TransformationTestsF, MishFusing) { { diff --git a/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp b/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp index 5b11259cbaf998..40f954312b7f76 100644 --- a/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_mimicking_sbs.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -20,15 +19,9 @@ TEST(SmartReshapeTests, MimickingSBS) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({12, 4})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 2, 3, 4})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, MimickingSBS_1) { @@ -40,15 +33,9 @@ TEST(SmartReshapeTests, MimickingSBS_1) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 24})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 2, 3, 4})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, MimickingSBS_2) { @@ -60,13 +47,7 @@ TEST(SmartReshapeTests, MimickingSBS_2) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(1)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({6, 4})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 2, 3, 4})); + EXPECT_ANY_THROW(set_batch(f, 1)); } diff --git a/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp b/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp index 5e8088a9f2371d..06408dc2807d36 100644 --- a/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_proposal_scales.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -39,12 +38,9 @@ TEST(SmartReshapeTests, Proposal1Scales) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Proposal1Scales_WithConvert) { @@ -75,12 +71,9 @@ TEST(SmartReshapeTests, Proposal1Scales_WithConvert) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Proposal4Scales) { @@ -110,14 +103,9 @@ TEST(SmartReshapeTests, Proposal4Scales) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Proposal4Scales_WithConvert) { @@ -148,12 +136,7 @@ TEST(SmartReshapeTests, Proposal4Scales_WithConvert) { f = std::make_shared(NodeVector{proposal}, ParameterVector{input_0, input_1, input_2}); } - InferenceEngine::CNNNetwork network(f); - auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5})); + EXPECT_ANY_THROW(set_batch(f, 2)); } diff --git a/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp b/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp index bad3962e3fd080..d98cb32f258f4d 100644 --- a/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_reshape_1d.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -19,20 +18,16 @@ TEST(SmartReshapeTests, Reshape1d) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE( - network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({5})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({5})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {1, 3, 300, 300}}})); + ASSERT_NO_THROW(f->reshape({{1, 3, 300, 300}})); check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({270000})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({270000})); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300})); } TEST(SmartReshapeTests, Reshape1d_negative) { @@ -44,19 +39,10 @@ TEST(SmartReshapeTests, Reshape1d_negative) { f = std::make_shared(NodeVector{reshape}, ParameterVector{input, pattern}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE( - network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().is_dynamic()); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().is_dynamic()); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {1, 3, 300, 300}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({270000})); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300})); - ASSERT_FALSE(network.getFunction()->get_parameters()[1]->get_output_target_inputs(0).empty()); + EXPECT_ANY_THROW(f->reshape({{1, 3, 300, 300}})); } diff --git a/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp b/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp index 002dc860dfffba..3c9053594ff68c 100644 --- a/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_strided_slice_squeeze.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -27,19 +26,13 @@ TEST(SmartReshapeTests, SS_Squeeze) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end_mask) { @@ -59,21 +52,19 @@ TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end_mask) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 128, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 128, 768})); auto unh = std::make_shared(); init_unique_names(f, unh); - auto inputname = network.getFunction()->get_parameters()[0]->get_friendly_name(); - ASSERT_NO_THROW(network.reshape(InferenceEngine::ICNNNetwork::InputShapes{{inputname, {2, 128, 768}}})); + auto inputname = f->get_parameters()[0]->get_friendly_name(); + ASSERT_NO_THROW(f->reshape({{2, 128, 768}})); check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 128, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({2, 128, 768})); } TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end) { @@ -95,21 +86,19 @@ TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 1, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 1, 768})); auto unh = std::make_shared(); init_unique_names(f, unh); - auto inputname = network.getFunction()->get_parameters()[0]->get_friendly_name(); - ASSERT_NO_THROW(network.reshape(InferenceEngine::ICNNNetwork::InputShapes{{inputname, {2, 1, 768}}})); + auto inputname = f->get_parameters()[0]->get_friendly_name(); + ASSERT_NO_THROW(f->reshape({{2, 1, 768}})); check_unique_names(f, unh); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 1, 768})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({2, 1, 768})); } TEST(SmartReshapeTests, SS_Squeeze_mask_use_negative) { @@ -128,15 +117,13 @@ TEST(SmartReshapeTests, SS_Squeeze_mask_use_negative) { f = std::make_shared(NodeVector{squeeze}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({1, 3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_ANY_THROW(network.setBatchSize(2)); + ASSERT_ANY_THROW(set_batch(f, 2)); check_unique_names(f, unh); } @@ -156,15 +143,13 @@ TEST(SmartReshapeTests, SS_Squeeze_negative_stride_negative) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_ANY_THROW(network.setBatchSize(2)); + ASSERT_ANY_THROW(set_batch(f, 2)); check_unique_names(f, unh); } @@ -185,20 +170,13 @@ TEST(SmartReshapeTests, SS_SharedSqueezes) { f = std::make_shared(NodeVector{relu_1, relu_2}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, SS_SqueezeNegativeAxes) { @@ -218,20 +196,13 @@ TEST(SmartReshapeTests, SS_SqueezeNegativeAxes) { f = std::make_shared(NodeVector{relu}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3, 1, 8, 1, 2})); + EXPECT_ANY_THROW(set_batch(f, 2)); } TEST(SmartReshapeTests, Squeeze_SSNegativeAxes) { @@ -250,18 +221,11 @@ TEST(SmartReshapeTests, Squeeze_SSNegativeAxes) { f = std::make_shared(NodeVector{ss}, ParameterVector{input}); } - InferenceEngine::CNNNetwork network(f); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) + << f->get_results()[0]->get_output_partial_shape(0); + ASSERT_TRUE(f->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.setBatchSize(2)); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) - << network.getFunction()->get_results()[0]->get_output_partial_shape(0); - ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3, 1, 8, 1, 2})); + EXPECT_ANY_THROW(set_batch(f, 2)); } diff --git a/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp b/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp index 9ff6aa84ca7419..25c30db2fa4339 100644 --- a/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp +++ b/src/common/transformations/tests/smart_reshape/sr_sub_graph_ops.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -49,26 +48,17 @@ TEST(SmartReshapeTests, TensorIteratorStaticParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); // concat output (seq len = 1, so it means num_iter = 1) - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, TensorIteratorDynamicParameters) { @@ -109,26 +99,17 @@ TEST(SmartReshapeTests, TensorIteratorDynamicParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); // concat output (seq len = 1, so it means num_iter = 1) - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, LoopStaticParameters) { @@ -174,29 +155,17 @@ TEST(SmartReshapeTests, LoopStaticParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE( - network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); // concat output - ASSERT_TRUE( - network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE( - network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, LoopDynamicParameters) { @@ -242,29 +211,17 @@ TEST(SmartReshapeTests, LoopDynamicParameters) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE( - network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); // concat output - ASSERT_TRUE( - network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE( - network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {32, 10, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } TEST(SmartReshapeTests, LoopParentParametersUsedInBody) { @@ -314,29 +271,17 @@ TEST(SmartReshapeTests, LoopParentParametersUsedInBody) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE( - network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); // concat output - ASSERT_TRUE( - network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); - ASSERT_TRUE( - network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible(PartialShape::dynamic())); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {4, 3, 2}}, - {f->get_parameters()[1]->get_friendly_name(), {4, 3, 2}}, - {f->get_parameters()[2]->get_friendly_name(), {4, 3, 2}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({4, 3, 2})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({4, 30, 2})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({4, 3, 2})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {4, 3, 2}}, + {f->get_parameters()[1]->get_friendly_name(), {4, 3, 2}}, + {f->get_parameters()[2]->get_friendly_name(), {4, 3, 2}}})); } TEST(SmartReshapeTests, TensorIteratorParentParameterUsedInBody) { @@ -381,24 +326,15 @@ TEST(SmartReshapeTests, TensorIteratorParentParameterUsedInBody) { f = std::make_shared(OutputVector{out0, out1, out2, out3}, ParameterVector{X, Y, M}); } - InferenceEngine::CNNNetwork network(f); - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[0]->get_output_partial_shape(0).compatible({})); + ASSERT_TRUE(f->get_results()[1]->get_output_partial_shape(0).compatible({1, 1, 1})); // concat output (seq len = 1, so it means num_iter = 1) - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[2]->get_output_partial_shape(0).compatible({1, 1, 1})); + ASSERT_TRUE(f->get_results()[3]->get_output_partial_shape(0).compatible({1, 1, 1})); auto unh = std::make_shared(); init_unique_names(f, unh); - ASSERT_NO_THROW(network.reshape( - InferenceEngine::ICNNNetwork::InputShapes{{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, - {f->get_parameters()[1]->get_friendly_name(), {1, 1, 1}}, - {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); - check_unique_names(f, unh); - - ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({})); - ASSERT_TRUE(network.getFunction()->get_results()[1]->get_output_partial_shape(0).compatible({32, 1, 10})); - // concat output - ASSERT_TRUE(network.getFunction()->get_results()[2]->get_output_partial_shape(0).compatible({32, 10, 10})); - ASSERT_TRUE(network.getFunction()->get_results()[3]->get_output_partial_shape(0).compatible({32, 1, 1})); + EXPECT_ANY_THROW(f->reshape({{f->get_parameters()[0]->get_friendly_name(), {32, 1, 10}}, + {f->get_parameters()[1]->get_friendly_name(), {1, 1, 1}}, + {f->get_parameters()[2]->get_friendly_name(), {32, 1, 10}}})); } diff --git a/src/common/transformations/tests/utils/primitives_priority_test.cpp b/src/common/transformations/tests/utils/primitives_priority_test.cpp index a748477e2b9137..64f6330a1da188 100644 --- a/src/common/transformations/tests/utils/primitives_priority_test.cpp +++ b/src/common/transformations/tests/utils/primitives_priority_test.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include @@ -22,8 +21,6 @@ using namespace ov; using namespace testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; TEST(TransformationTests, ConvBiasFusion) { std::shared_ptr f(nullptr); @@ -46,12 +43,7 @@ TEST(TransformationTests, ConvBiasFusion) { std::unordered_map pp; - InferenceEngine::CNNNetwork network(f); - - // Set PrimitivesPriority to all Convolutions - auto model = network.getFunction(); - ASSERT_NE(nullptr, model); - for (auto& op : model->get_ops()) { + for (auto& op : f->get_ops()) { if (auto conv = std::dynamic_pointer_cast(op)) { auto& rtInfo = conv->get_rt_info(); rtInfo[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("test"); @@ -59,8 +51,7 @@ TEST(TransformationTests, ConvBiasFusion) { } } - auto clonedNetwork = InferenceEngine::details::cloneNetwork(network); - auto funcs = clonedNetwork.getFunction(); + auto funcs = f->clone(); for (auto& op : funcs->get_ops()) { if (auto conv = std::dynamic_pointer_cast(op)) {