Skip to content

Commit

Permalink
[ONNX] Align behavior of ReduceL2-11, 13, 18 with original framework (#…
Browse files Browse the repository at this point in the history
…22741)

### Details:
- I've aligned the ReduceL2 operation with opset 11, 13, and 18. I have
some doubts about how to implement support for the tensor bfloat16 for
opset 13 and also some doubts about opset 18. I've registered the
function inside the ops_bridge.cpp, created test models, and added them
inside onnx_import.in.cpp.
### Tickets:
 - Closes #20560

---------

Co-authored-by: Przemyslaw Wysocki <przemyslaw.wysocki@intel.com>
Co-authored-by: Georgy Krivoruchko <georgy.krivoruchko@intel.com>
  • Loading branch information
3 people authored Apr 24, 2024
1 parent 8ec1fdb commit 69ffb51
Show file tree
Hide file tree
Showing 9 changed files with 223 additions and 9 deletions.
7 changes: 7 additions & 0 deletions src/frontends/onnx/frontend/src/op/reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@ ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node) {
return {make_ov_reduction_op<v1::ReduceSum>(node, square_node, supported_types_v1)};
}
} // namespace set_1

/*
Opset 11 is skipped because there are no significant difference between opset1 and opset 11.
Found difference is:
Expand All @@ -198,6 +199,9 @@ namespace set_13 {
ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node) {
return {make_ov_reduction_op<v1::ReduceSum>(node, node.get_ov_inputs().at(0), supported_types_v2, false)};
}
ov::OutputVector reduce_l2(const Node& node) {
return {make_ov_reduction_op<v4::ReduceL2>(node, node.get_ov_inputs().at(0), supported_types_v2)};
}
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) {
return {make_ov_reduction_op<v1::ReduceMax>(node, node.get_ov_inputs().at(0), supported_types_v3)};
}
Expand All @@ -208,6 +212,9 @@ ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node) {
} // namespace set_13

namespace set_18 {
ov::OutputVector reduce_l2(const Node& node) {
return {make_ov_reduction_op<v4::ReduceL2>(node, node.get_ov_inputs().at(0), supported_types_v2, false)};
}
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) {
return {make_ov_reduction_op<v1::ReduceMax>(node, node.get_ov_inputs().at(0), supported_types_v3, false)};
}
Expand Down
7 changes: 7 additions & 0 deletions src/frontends/onnx/frontend/src/op/reduce.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,12 @@ ov::OutputVector reduce_l1(const ov::frontend::onnx::Node& node);
namespace set_1 {
ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node);
} // namespace set_1
namespace set_13 {
ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node);
} // namespace set_13
namespace set_18 {
ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node);
} // namespace set_18

namespace set_1 {
ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node);
Expand Down Expand Up @@ -73,6 +79,7 @@ ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node);
namespace set_1 {
ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node);
} // namespace set_1

} // namespace op
} // namespace onnx
} // namespace frontend
Expand Down
2 changes: 2 additions & 0 deletions src/frontends/onnx/frontend/src/ops_bridge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -487,6 +487,8 @@ OperatorsBridge::OperatorsBridge() {
REGISTER_OPERATOR("ReduceLogSumExp", 1, reduce_log_sum_exp);
REGISTER_OPERATOR("ReduceL1", 1, reduce_l1);
REGISTER_OPERATOR("ReduceL2", 1, reduce_l2);
REGISTER_OPERATOR("ReduceL2", 13, reduce_l2);
REGISTER_OPERATOR("ReduceL2", 18, reduce_l2);
REGISTER_OPERATOR("ReduceMax", 1, reduce_max);
REGISTER_OPERATOR("ReduceMax", 13, reduce_max);
REGISTER_OPERATOR("ReduceMax", 18, reduce_max);
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/onnx/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True):
xfail_issue_99957 = xfail_test(reason="LayerNorm - RuntimeError: While validating node '<Node(Reshape): Mean>'")
xfail_issue_99960 = xfail_test(reason="MVN - Results mismatch")
xfail_issue_99961 = xfail_test(reason="Optional has/get element operators are not supported)'")
xfail_issue_99962 = pytest.mark.skip(reason="ReduceL1/L2 - Unrecognized attribute: axes for operator ReduceL1/L2")
xfail_issue_99968 = xfail_test(reason="ReduceL1/L2 - Results mismatch or unsupported ReduceSum with "
xfail_issue_99962 = pytest.mark.skip(reason="ReduceL1 - Unrecognized attribute: axes for operator ReduceL1")
xfail_issue_99968 = xfail_test(reason="ReduceL1 - Results mismatch or unsupported ReduceSum with "
"dynamic rank by CPU plugin")
xfail_issue_99969 = xfail_test(reason="Resize - Results mismatch / "
"RuntimeError: While validating ONNX node '<Node(Resize): Y>' / "
Expand Down
48 changes: 48 additions & 0 deletions src/frontends/onnx/tests/models/reduce_l2_11.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
ir_version: 4
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "A"
output: "B"
op_type: "ReduceL2"
}
name: "compute_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 4
}
dim {
dim_value: 4
}
}
}
}
}
output {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 11
}
48 changes: 48 additions & 0 deletions src/frontends/onnx/tests/models/reduce_l2_13.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
ir_version: 4
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "A"
output: "B"
op_type: "ReduceL2"
}
name: "compute_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 4
}
dim {
dim_value: 4
}
}
}
}
}
output {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 13
}
61 changes: 61 additions & 0 deletions src/frontends/onnx/tests/models/reduce_l2_18.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
ir_version: 4
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "A"
output: "B"
op_type: "ReduceL2"
}
name: "compute_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 4
}
dim {
dim_value: 4
}
}
}
}
}
input {
name: "axes"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 18
}
48 changes: 48 additions & 0 deletions src/frontends/onnx/tests/onnx_import.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1076,6 +1076,54 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2) {
test_case.run();
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2_11) {
auto model = convert_model("reduce_l2_11.onnx");

// input data shape (1, 1, 4, 4)
Inputs inputs{
ov::test::NDArray<float, 4>({{{{3, 3, 3, 3}, {3, 3, 3, 3}, {3, 3, 3, 3}, {3, 3, 3, 3}}}}).get_vector()};

// output data shape (1,)
auto expected_output = ov::test::NDArray<float, 4>({{{{12}}}}).get_vector();

auto test_case = ov::test::TestCase(model, s_device);
test_case.add_multiple_inputs(inputs);
test_case.add_expected_output(expected_output);
test_case.run();
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2_13) {
auto model = convert_model("reduce_l2_13.onnx");

// input data shape (1, 1, 4, 4)
Inputs inputs{
ov::test::NDArray<float, 4>({{{{4, 4, 4, 4}, {4, 4, 4, 4}, {4, 4, 4, 4}, {4, 4, 4, 4}}}}).get_vector()};

// output data shape (1,)
auto expected_output = ov::test::NDArray<float, 4>({{{{16}}}}).get_vector();

auto test_case = ov::test::TestCase(model, s_device);
test_case.add_multiple_inputs(inputs);
test_case.add_expected_output(expected_output);
test_case.run();
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_l2_18) {
auto model = convert_model("reduce_l2_18.onnx");

// input data shape (1, 1, 4, 4)
Inputs inputs{
ov::test::NDArray<float, 4>({{{{5, 5, 5, 5}, {5, 5, 5, 5}, {5, 5, 5, 5}, {5, 5, 5, 5}}}}).get_vector()};

// output data shape (1,)
auto expected_output = ov::test::NDArray<float, 4>({{{{20}}}}).get_vector();

auto test_case = ov::test::TestCase(model, s_device);
test_case.add_multiple_inputs(inputs);
test_case.add_expected_output(expected_output);
test_case.run();
}

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_reduce_max) {
auto model = convert_model("reduce_max.onnx");

Expand Down
7 changes: 0 additions & 7 deletions src/frontends/onnx/tests/tests_python/test_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,12 +465,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None
"OnnxBackendNodeModelTest.test_reduce_l1_keep_dims_random_cpu",
"OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_example_cpu",
"OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_random_cpu",
"OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_example_cpu",
"OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_random_cpu",
"OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_example_cpu",
"OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_random_cpu",
"OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_example_cpu",
"OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_random_cpu",
"OnnxBackendNodeModelTest.test_reduce_log_sum_asc_axes_cpu",
"OnnxBackendNodeModelTest.test_reduce_log_sum_asc_axes_expanded_cpu",
"OnnxBackendNodeModelTest.test_reduce_log_sum_desc_axes_cpu",
Expand Down Expand Up @@ -689,7 +683,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None
(
xfail_issue_125493,
"OnnxBackendNodeModelTest.test_reduce_l1_empty_set_cpu",
"OnnxBackendNodeModelTest.test_reduce_l2_empty_set_cpu",
"OnnxBackendNodeModelTest.test_reduce_log_sum_exp_empty_set_cpu",
"OnnxBackendNodeModelTest.test_reduce_prod_empty_set_cpu",
"OnnxBackendNodeModelTest.test_reduce_sum_square_empty_set_cpu",
Expand Down

0 comments on commit 69ffb51

Please sign in to comment.