From 3626da4689f647008ed9259cc2e5a71743fc06d7 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Thu, 10 Aug 2023 07:18:36 +0530 Subject: [PATCH 01/21] ONNX BlackManWindow enabled --- .../onnx/frontend/src/op/blackmanwindow.cpp | 94 +++++++++++++++++++ .../onnx/frontend/src/op/blackmanwindow.hpp | 16 ++++ .../onnx/frontend/src/ops_bridge.cpp | 2 + 3 files changed, 112 insertions(+) create mode 100644 src/frontends/onnx/frontend/src/op/blackmanwindow.cpp create mode 100644 src/frontends/onnx/frontend/src/op/blackmanwindow.hpp diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp new file mode 100644 index 00000000000000..0738ff585a4721 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -0,0 +1,94 @@ +#include "op/blackmanwindow.hpp" + +#include + +#include "utils/common.hpp" +#include "default_opset.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector blackmanwindow(const Node& node) { + const auto size = node.get_ng_inputs().at(0); + const auto output_datatype = node.get_attribute_value("output_datatype", 1); + const bool periodic = node.get_attribute_value("periodic", 1); + + const int64_t a_0 = 0.42, a_1 = -0.5, a_2 = 0.08; + const ov::PartialShape shape = size.get_partial_shape(); + const std::vector axis_lengths = shape.to_shape(); + + element::Type tensor_type; + switch (output_datatype) { + case 1: + tensor_type = element::f32; + break; + case 2: + tensor_type = element::u8; + break; + case 3: + tensor_type = element::i8; + break; + case 4: + tensor_type = element::u16; + break; + case 5: + tensor_type = element::i16; + break; + case 6: + tensor_type = element::i32; + break; + case 7: + tensor_type = element::i64; + break; + case 10: + tensor_type = element::f16; + break; + case 11: + tensor_type = element::f64; + break; + case 12: + tensor_type = element::u32; + break; + case 13: + tensor_type = element::u64; + break; + case 16: + tensor_type = element::bf16; + break; + default: + throw std::runtime_error("Unsupported output data type."); + } + + if (periodic) { + const auto range = std::make_shared(tensor_type, size, 0, 1); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor_1 = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), size)); + const auto factor_2 = std::make_shared(range, std::make_shared(std::make_shared(pi, 4), size)); + const auto cos_1 = std::make_shared(factor_1); + const auto cos_2 = std::make_shared(factor_2); + const auto scaled_cos_1 = std::make_shared(cos_1, a_1); + const auto scaled_cos_2 = std::make_shared(cos_2, a_2); + const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; + } else { + const auto range = std::make_shared(tensor_type, size, 0, 1); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor_1 = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), std::make_shared(size, 1))); + const auto factor_2 = std::make_shared(range, std::make_shared(std::make_shared(pi, 4), std::make_shared(size, 1))); + const auto cos_1 = std::make_shared(factor_1); + const auto cos_2 = std::make_shared(factor_2); + const auto scaled_cos_1 = std::make_shared(cos_1, a_1); + const auto scaled_cos_2 = std::make_shared(cos_2, a_2); + const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; + } +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp new file mode 100644 index 00000000000000..a4dae856d607d2 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -0,0 +1,16 @@ +#pragma once + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { + +OutputVector blackmanwindow(const Node& node); + +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index b5da27ce11814c..bc84c317c97003 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -29,6 +29,7 @@ #include "op/average_pool.hpp" #include "op/batch_norm.hpp" #include "op/bitshift.hpp" +#include "op/blackmanwindow.hpp" #include "op/cast.hpp" #include "op/cast_like.hpp" #include "op/ceil.hpp" @@ -343,6 +344,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("BatchNormalization", 1, batch_norm); REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BitShift", 1, bitshift); + REGISTER_OPERATOR("BlackManWindow", 1, blackmanwindow); REGISTER_OPERATOR("Cast", 1, cast); REGISTER_OPERATOR("CastLike", 1, cast_like); REGISTER_OPERATOR("Ceil", 1, ceil); From 799a68eaaa6c31ff908cb984f0130ffb05d85590 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Thu, 10 Aug 2023 17:06:26 +0530 Subject: [PATCH 02/21] added a test periodic --- .../models/blackmanwindow_periodic.prototxt | 55 +++++++++++++++++++ .../models/blackmanwindow_symmetric.prototxt | 55 +++++++++++++++++++ src/frontends/onnx/tests/onnx_import.in.cpp | 14 +++++ 3 files changed, 124 insertions(+) create mode 100644 src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt create mode 100644 src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt diff --git a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt new file mode 100644 index 00000000000000..8f24affd4f403a --- /dev/null +++ b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt @@ -0,0 +1,55 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "BlackmanWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 1 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_blackmanwindow_periodic" + initializer { + dims: 1 + data_type: 7 # INT64 + int64_data: 10 + name: "size" + } + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 12 +} diff --git a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt new file mode 100644 index 00000000000000..a46187a448923a --- /dev/null +++ b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt @@ -0,0 +1,55 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "BlackmanWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "symmetric" + i: 0 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_blackmanwindow_symmetric" + initializer { + dims: 1 + data_type: 7 # INT64 + int64_data: 10 + name: "size" + } + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 12 +} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index db59268baa98a3..840ed646dd4208 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6701,3 +6701,17 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unique_3d_with_duplicates_and_axis_2) test_case.run(); } + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow) { + auto function = + onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/blackmanwindow.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, {0.0000f, 0.0509f, 0.2580f, 0.6300f, 0.9511f, 0.9511f, 0.6300f, 0.2580f, 0.0509f, 0.0000f}); + + test_case.run(); +} From 48ddce820c2784eb64f4b69a1d29b026fb990a64 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Thu, 10 Aug 2023 19:57:18 +0530 Subject: [PATCH 03/21] Add the license statement --- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 3 +++ src/frontends/onnx/frontend/src/op/blackmanwindow.hpp | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 0738ff585a4721..2a47892f66ae21 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -1,3 +1,6 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + #include "op/blackmanwindow.hpp" #include diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index a4dae856d607d2..553cc8b838630a 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -1,3 +1,6 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + #pragma once #include "ngraph/node.hpp" From eb607aec28fd8113553b0c424d91ce510955b69f Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Sat, 26 Aug 2023 00:20:54 +0530 Subject: [PATCH 04/21] ONNX HammingWindow, HannWindow enabled also added basic tests for each --- .../onnx/frontend/src/op/hammingwindow.cpp | 93 +++++++++++++++++++ .../onnx/frontend/src/op/hammingwindow.hpp | 19 ++++ .../onnx/frontend/src/op/hannwindow.cpp | 93 +++++++++++++++++++ .../onnx/frontend/src/op/hannwindow.hpp | 19 ++++ .../onnx/frontend/src/ops_bridge.cpp | 4 + .../models/hammingwindow_periodic.prototxt | 55 +++++++++++ .../models/hammingwindow_symmetric.prototxt | 55 +++++++++++ .../tests/models/hannwindow_periodic.prototxt | 55 +++++++++++ .../models/hannwindow_symmetric.prototxt | 55 +++++++++++ src/frontends/onnx/tests/onnx_import.in.cpp | 44 ++++++++- 10 files changed, 491 insertions(+), 1 deletion(-) create mode 100644 src/frontends/onnx/frontend/src/op/hammingwindow.cpp create mode 100644 src/frontends/onnx/frontend/src/op/hammingwindow.hpp create mode 100644 src/frontends/onnx/frontend/src/op/hannwindow.cpp create mode 100644 src/frontends/onnx/frontend/src/op/hannwindow.hpp create mode 100644 src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt create mode 100644 src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt create mode 100644 src/frontends/onnx/tests/models/hannwindow_periodic.prototxt create mode 100644 src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp new file mode 100644 index 00000000000000..d3fc34b2463a32 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -0,0 +1,93 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "op/hammingwindow.hpp" + +#include + +#include "utils/common.hpp" +#include "default_opset.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector hammingwindow(const Node& node) { + const auto size = node.get_ng_inputs().at(0); + const auto output_datatype = node.get_attribute_value("output_datatype", 1); + const bool periodic = node.get_attribute_value("periodic", 1); + + const float a_0 = 25.0 / 46.0; + const float a_1 = 1.0 - a_0; + + const ov::PartialShape shape = size.get_partial_shape(); + const std::vector axis_lengths = shape.to_shape(); + + element::Type tensor_type; + switch (output_datatype) { + case 1: + tensor_type = element::f32; + break; + case 2: + tensor_type = element::u8; + break; + case 3: + tensor_type = element::i8; + break; + case 4: + tensor_type = element::u16; + break; + case 5: + tensor_type = element::i16; + break; + case 6: + tensor_type = element::i32; + break; + case 7: + tensor_type = element::i64; + break; + case 10: + tensor_type = element::f16; + break; + case 11: + tensor_type = element::f64; + break; + case 12: + tensor_type = element::u32; + break; + case 13: + tensor_type = element::u64; + break; + case 16: + tensor_type = element::bf16; + break; + default: + throw std::runtime_error("Unsupported output data type."); + } + + if (periodic) { + const auto range = std::make_shared(tensor_type, size, 0, 1); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), size)); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; + } else { + const auto range = std::make_shared(tensor_type, size, 0, 1); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), std::make_shared(size, 1))); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; + } +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp new file mode 100644 index 00000000000000..106587949a5f95 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { + +OutputVector hammingwindow(const Node& node); + +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp new file mode 100644 index 00000000000000..b463fe214337da --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -0,0 +1,93 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "op/hannwindow.hpp" + +#include + +#include "utils/common.hpp" +#include "default_opset.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector hannwindow(const Node& node) { + const auto size = node.get_ng_inputs().at(0); + const auto output_datatype = node.get_attribute_value("output_datatype", 1); + const bool periodic = node.get_attribute_value("periodic", 1); + + const float a_0 = 0.5; + const float a_1 = 0.5; + + const ov::PartialShape shape = size.get_partial_shape(); + const std::vector axis_lengths = shape.to_shape(); + + element::Type tensor_type; + switch (output_datatype) { + case 1: + tensor_type = element::f32; + break; + case 2: + tensor_type = element::u8; + break; + case 3: + tensor_type = element::i8; + break; + case 4: + tensor_type = element::u16; + break; + case 5: + tensor_type = element::i16; + break; + case 6: + tensor_type = element::i32; + break; + case 7: + tensor_type = element::i64; + break; + case 10: + tensor_type = element::f16; + break; + case 11: + tensor_type = element::f64; + break; + case 12: + tensor_type = element::u32; + break; + case 13: + tensor_type = element::u64; + break; + case 16: + tensor_type = element::bf16; + break; + default: + throw std::runtime_error("Unsupported output data type."); + } + + if (periodic) { + const auto range = std::make_shared(tensor_type, size, 0, 1); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), size)); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; + } else { + const auto range = std::make_shared(tensor_type, size, 0, 1); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), std::make_shared(size, 1))); + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; + } +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp new file mode 100644 index 00000000000000..f36b8be505d611 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { + +OutputVector hannwindow(const Node& node); + +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index bc84c317c97003..498e8b14ca8610 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -76,6 +76,8 @@ #include "op/greater.hpp" #include "op/grid_sample.hpp" #include "op/gru.hpp" +#include "op/hammingwindow.hpp" +#include "op/hannwindow.hpp" #include "op/hard_sigmoid.hpp" #include "op/hard_swish.hpp" #include "op/hardmax.hpp" @@ -392,6 +394,8 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("Greater", 1, greater); REGISTER_OPERATOR("GridSample", 1, grid_sample); REGISTER_OPERATOR("GRU", 1, gru); + REGISTER_OPERATOR("HannWindow", 1, hannwindow); + REGISTER_OPERATOR("HammingWindow", 1, hammingwindow); REGISTER_OPERATOR("Hardmax", 1, hardmax); REGISTER_OPERATOR("Hardmax", 13, hardmax); REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid); diff --git a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt new file mode 100644 index 00000000000000..e34be9c222070e --- /dev/null +++ b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt @@ -0,0 +1,55 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HammingWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 1 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_hammingwindow_periodic" + initializer { + dims: 1 + data_type: 7 # INT64 + int64_data: 10 + name: "size" + } + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 12 +} diff --git a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt new file mode 100644 index 00000000000000..9bb88aed431131 --- /dev/null +++ b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt @@ -0,0 +1,55 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HammingWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 0 # Set to 0 for symmetric, 1 for periodic + type: INT + } + } + name: "test_hammingwindow_symmetric" + initializer { + dims: 1 + data_type: 7 # INT64 + int64_data: 10 + name: "size" + } + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 12 +} diff --git a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt new file mode 100644 index 00000000000000..bde433827feefd --- /dev/null +++ b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt @@ -0,0 +1,55 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HannWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 1 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_hannwindow_periodic" + initializer { + dims: 1 + data_type: 7 # INT64 + int64_data: 10 + name: "size" + } + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 12 +} diff --git a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt new file mode 100644 index 00000000000000..725e6a199385a0 --- /dev/null +++ b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt @@ -0,0 +1,55 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HannWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 0 # Set to 0 for symmetric, 1 for periodic + type: INT + } + } + name: "test_hannwindow_symmetric" + initializer { + dims: 1 + data_type: 7 # INT64 + int64_data: 10 + name: "size" + } + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 12 +} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 840ed646dd4208..19d12396fa2022 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6706,7 +6706,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow) { auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, - "onnx/blackmanwindow.onnx")); + "onnx/blackmanwindow_periodic.onnx")); auto test_case = ov::test::TestCase(function, s_device); @@ -6715,3 +6715,45 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow) { test_case.run(); } + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { + auto function = + onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hammingwindow_periodic.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, {0.0800f, 0.2533f, 0.6424f, 1.0000f, 0.6424f, 0.2533f, 0.0800f, 0.0000f, 0.0800f, 0.2533f}); + + test_case.run(); +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { + auto function = + onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hannwindow_periodic.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, {0.0000f, 0.1908f, 0.5f, 0.8092f, 1.0000f, 0.8092f, 0.5f, 0.1908f, 0.0000f, 0.0000f}); + + test_case.run(); +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) { + auto function = + onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hannwindow_symmetric.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, {0.0000f, 0.1908f, 0.5f, 0.8092f, 1.0000f, 0.8092f, 0.5f, 0.1908f, 0.0000f, 0.0000f}); + + test_case.run(); +} From 70c5d923d09d72d16f701657d2188759dcd52535 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Sat, 26 Aug 2023 10:48:24 +0530 Subject: [PATCH 05/21] minor tests added --- src/frontends/onnx/tests/onnx_import.in.cpp | 32 ++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 19d12396fa2022..8eef8e3a85bff6 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6702,7 +6702,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unique_3d_with_duplicates_and_axis_2) test_case.run(); } -OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow) { +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) { auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, @@ -6716,6 +6716,21 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow) { test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { + auto function = + onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/blackmanwindow_symmetric.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, {0.0000f, 0.0509f, 0.2580f, 0.6300f, 0.9511f, 0.9511f, 0.6300f, 0.2580f, 0.0509f, 0.0000f}); + + test_case.run(); +} + + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), @@ -6730,6 +6745,21 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) { + auto function = + onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hammingwindow_symmetric.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, {0.0800f, 0.2533f, 0.6424f, 1.0000f, 0.6424f, 0.2533f, 0.0800f, 0.0000f, 0.0800f, 0.2533f}); + + test_case.run(); +} + + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), From d1f763afc50ddb1b44ea8c1735cd15451d9daef2 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Sun, 3 Sep 2023 17:59:46 +0530 Subject: [PATCH 06/21] made reviewed changes --- .../onnx/frontend/src/op/blackmanwindow.cpp | 149 ++++++++++-------- .../onnx/frontend/src/op/blackmanwindow.hpp | 4 +- .../onnx/frontend/src/op/hammingwindow.cpp | 136 ++++++++-------- .../onnx/frontend/src/op/hammingwindow.hpp | 4 +- .../onnx/frontend/src/op/hannwindow.cpp | 134 ++++++++-------- .../onnx/frontend/src/op/hannwindow.hpp | 4 +- .../onnx/frontend/src/ops_bridge.cpp | 6 +- 7 files changed, 228 insertions(+), 209 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 2a47892f66ae21..1a87f659552b08 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -5,93 +5,104 @@ #include -#include "utils/common.hpp" #include "default_opset.hpp" +#include "utils/common.hpp" namespace ngraph { namespace onnx_import { namespace op { -namespace set_1 { +namespace set_17 { OutputVector blackmanwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = node.get_attribute_value("output_datatype", 1); + const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); - const int64_t a_0 = 0.42, a_1 = -0.5, a_2 = 0.08; const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); element::Type tensor_type; switch (output_datatype) { - case 1: - tensor_type = element::f32; - break; - case 2: - tensor_type = element::u8; - break; - case 3: - tensor_type = element::i8; - break; - case 4: - tensor_type = element::u16; - break; - case 5: - tensor_type = element::i16; - break; - case 6: - tensor_type = element::i32; - break; - case 7: - tensor_type = element::i64; - break; - case 10: - tensor_type = element::f16; - break; - case 11: - tensor_type = element::f64; - break; - case 12: - tensor_type = element::u32; - break; - case 13: - tensor_type = element::u64; - break; - case 16: - tensor_type = element::bf16; - break; - default: - throw std::runtime_error("Unsupported output data type."); + case element::Type_t::f32: + tensor_type = element::f32; + break; + case element::Type_t::u8: + tensor_type = element::u8; + break; + case element::Type_t::i8: + tensor_type = element::i8; + break; + case element::Type_t::u16: + tensor_type = element::u16; + break; + case element::Type_t::i16: + tensor_type = element::i16; + break; + case element::Type_t::i32: + tensor_type = element::i32; + break; + case element::Type_t::i64: + tensor_type = element::i64; + break; + case element::Type_t::f16: + tensor_type = element::f16; + break; + case element::Type_t::f64: + tensor_type = element::f64; + break; + case element::Type_t::u32: + tensor_type = element::u32; + break; + case element::Type_t::u64: + tensor_type = element::u64; + break; + case element::Type_t::bf16: + tensor_type = element::bf16; + break; + default: + throw std::runtime_error("Unsupported output data type."); } - if (periodic) { - const auto range = std::make_shared(tensor_type, size, 0, 1); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); - const auto factor_1 = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), size)); - const auto factor_2 = std::make_shared(range, std::make_shared(std::make_shared(pi, 4), size)); - const auto cos_1 = std::make_shared(factor_1); - const auto cos_2 = std::make_shared(factor_2); - const auto scaled_cos_1 = std::make_shared(cos_1, a_1); - const auto scaled_cos_2 = std::make_shared(cos_2, a_2); - const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); - - return {output}; - } else { - const auto range = std::make_shared(tensor_type, size, 0, 1); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); - const auto factor_1 = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), std::make_shared(size, 1))); - const auto factor_2 = std::make_shared(range, std::make_shared(std::make_shared(pi, 4), std::make_shared(size, 1))); - const auto cos_1 = std::make_shared(factor_1); - const auto cos_2 = std::make_shared(factor_2); - const auto scaled_cos_1 = std::make_shared(cos_1, a_1); - const auto scaled_cos_2 = std::make_shared(cos_2, a_2); - const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + // Weights as described in ONNX BlackManWindow docs + // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow + const auto a_0 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.42}); + const auto a_1 = std::make_shared(tensor_type, ov::Shape(), std::vector{-0.50}); + const auto a_2 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.08}); - return {output}; - } + const auto start = std::make_shared(tensor_type, ov::Shape(), std::vector{0.0}); + const auto step = std::make_shared(tensor_type, ov::Shape(), std::vector{1.0}); + const auto range = std::make_shared(start, size, step, tensor_type); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor_1 = std::make_shared( + range, + std::make_shared( + std::make_shared( + pi, + std::make_shared(tensor_type, ov::Shape(), std::vector{2})), + periodic ? size + : std::make_shared( + size, + std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + const auto factor_2 = std::make_shared( + range, + std::make_shared( + std::make_shared( + pi, + std::make_shared(tensor_type, ov::Shape(), std::vector{4})), + periodic ? size + : std::make_shared( + size, + std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + const auto cos_1 = std::make_shared(factor_1); + const auto cos_2 = std::make_shared(factor_2); + const auto scaled_cos_1 = std::make_shared(cos_1, a_1); + const auto scaled_cos_2 = std::make_shared(cos_2, a_2); + const auto y_values = + std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; } -} // namespace set_1 +} // namespace set_17 } // namespace op } // namespace onnx_import } // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index 553cc8b838630a..5d5986c31749b0 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -9,11 +9,11 @@ namespace ngraph { namespace onnx_import { namespace op { -namespace set_1 { +namespace set_17 { OutputVector blackmanwindow(const Node& node); -} // namespace set_1 +} // namespace set_17 } // namespace op } // namespace onnx_import } // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index d3fc34b2463a32..4108fbb279e018 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -5,89 +5,95 @@ #include -#include "utils/common.hpp" #include "default_opset.hpp" +#include "utils/common.hpp" namespace ngraph { namespace onnx_import { namespace op { -namespace set_1 { +namespace set_17 { OutputVector hammingwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = node.get_attribute_value("output_datatype", 1); + const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); - const float a_0 = 25.0 / 46.0; - const float a_1 = 1.0 - a_0; - const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); element::Type tensor_type; switch (output_datatype) { - case 1: - tensor_type = element::f32; - break; - case 2: - tensor_type = element::u8; - break; - case 3: - tensor_type = element::i8; - break; - case 4: - tensor_type = element::u16; - break; - case 5: - tensor_type = element::i16; - break; - case 6: - tensor_type = element::i32; - break; - case 7: - tensor_type = element::i64; - break; - case 10: - tensor_type = element::f16; - break; - case 11: - tensor_type = element::f64; - break; - case 12: - tensor_type = element::u32; - break; - case 13: - tensor_type = element::u64; - break; - case 16: - tensor_type = element::bf16; - break; - default: - throw std::runtime_error("Unsupported output data type."); + case element::Type_t::f32: + tensor_type = element::f32; + break; + case element::Type_t::u8: + tensor_type = element::u8; + break; + case element::Type_t::i8: + tensor_type = element::i8; + break; + case element::Type_t::u16: + tensor_type = element::u16; + break; + case element::Type_t::i16: + tensor_type = element::i16; + break; + case element::Type_t::i32: + tensor_type = element::i32; + break; + case element::Type_t::i64: + tensor_type = element::i64; + break; + case element::Type_t::f16: + tensor_type = element::f16; + break; + case element::Type_t::f64: + tensor_type = element::f64; + break; + case element::Type_t::u32: + tensor_type = element::u32; + break; + case element::Type_t::u64: + tensor_type = element::u64; + break; + case element::Type_t::bf16: + tensor_type = element::bf16; + break; + default: + throw std::runtime_error("Unsupported output data type."); } - if (periodic) { - const auto range = std::make_shared(tensor_type, size, 0, 1); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); - const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), size)); - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); - - return {output}; - } else { - const auto range = std::make_shared(tensor_type, size, 0, 1); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); - const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), std::make_shared(size, 1))); - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + // Weights as described in ONNX BlackManWindow docs + // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow + const auto a_0 = std::make_shared( + std::make_shared(tensor_type, ov::Shape(), std::vector{25}), + std::make_shared(tensor_type, ov::Shape(), std::vector{46})); + const auto a_1 = std::make_shared( + std::make_shared(tensor_type, ov::Shape(), std::vector{1}), + a_0); - return {output}; - } + const auto start = std::make_shared(tensor_type, ov::Shape(), std::vector{0.0}); + const auto step = std::make_shared(tensor_type, ov::Shape(), std::vector{1.0}); + const auto range = std::make_shared(start, size, step, tensor_type); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor = std::make_shared( + range, + std::make_shared( + std::make_shared( + pi, + std::make_shared(tensor_type, ov::Shape(), std::vector{2})), + periodic ? size + : std::make_shared( + size, + std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; } -} // namespace set_1 +} // namespace set_17 } // namespace op } // namespace onnx_import } // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp index 106587949a5f95..2675beb190e8a6 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -9,11 +9,11 @@ namespace ngraph { namespace onnx_import { namespace op { -namespace set_1 { +namespace set_17 { OutputVector hammingwindow(const Node& node); -} // namespace set_1 +} // namespace set_17 } // namespace op } // namespace onnx_import } // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index b463fe214337da..4abab6a9f85f69 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -5,89 +5,91 @@ #include -#include "utils/common.hpp" #include "default_opset.hpp" +#include "utils/common.hpp" namespace ngraph { namespace onnx_import { namespace op { -namespace set_1 { +namespace set_17 { OutputVector hannwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = node.get_attribute_value("output_datatype", 1); + const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); - const float a_0 = 0.5; - const float a_1 = 0.5; - const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); element::Type tensor_type; switch (output_datatype) { - case 1: - tensor_type = element::f32; - break; - case 2: - tensor_type = element::u8; - break; - case 3: - tensor_type = element::i8; - break; - case 4: - tensor_type = element::u16; - break; - case 5: - tensor_type = element::i16; - break; - case 6: - tensor_type = element::i32; - break; - case 7: - tensor_type = element::i64; - break; - case 10: - tensor_type = element::f16; - break; - case 11: - tensor_type = element::f64; - break; - case 12: - tensor_type = element::u32; - break; - case 13: - tensor_type = element::u64; - break; - case 16: - tensor_type = element::bf16; - break; - default: - throw std::runtime_error("Unsupported output data type."); + case element::Type_t::f32: + tensor_type = element::f32; + break; + case element::Type_t::u8: + tensor_type = element::u8; + break; + case element::Type_t::i8: + tensor_type = element::i8; + break; + case element::Type_t::u16: + tensor_type = element::u16; + break; + case element::Type_t::i16: + tensor_type = element::i16; + break; + case element::Type_t::i32: + tensor_type = element::i32; + break; + case element::Type_t::i64: + tensor_type = element::i64; + break; + case element::Type_t::f16: + tensor_type = element::f16; + break; + case element::Type_t::f64: + tensor_type = element::f64; + break; + case element::Type_t::u32: + tensor_type = element::u32; + break; + case element::Type_t::u64: + tensor_type = element::u64; + break; + case element::Type_t::bf16: + tensor_type = element::bf16; + break; + default: + throw std::runtime_error("Unsupported output data type."); } - if (periodic) { - const auto range = std::make_shared(tensor_type, size, 0, 1); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); - const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), size)); - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); - - return {output}; - } else { - const auto range = std::make_shared(tensor_type, size, 0, 1); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); - const auto factor = std::make_shared(range, std::make_shared(std::make_shared(pi, 2), std::make_shared(size, 1))); - const auto cos = std::make_shared(factor); - const auto scaled_cos = std::make_shared(cos, a_1); - const auto y_values = std::make_shared(a_0, scaled_cos); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + // Weights as described in ONNX BlackManWindow docs + // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow + const auto a_0 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.5}); + const auto a_1 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.5}); - return {output}; - } + const auto start = std::make_shared(tensor_type, ov::Shape(), std::vector{0.0}); + const auto step = std::make_shared(tensor_type, ov::Shape(), std::vector{1.0}); + const auto range = std::make_shared(start, size, step, tensor_type); + const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto factor = std::make_shared( + range, + std::make_shared( + std::make_shared( + pi, + std::make_shared(tensor_type, ov::Shape(), std::vector{2})), + periodic ? size + : std::make_shared( + size, + std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); + + return {output}; } -} // namespace set_1 +} // namespace set_17 } // namespace op } // namespace onnx_import -} // namespace ngraph +} // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp index f36b8be505d611..8c2332498cac94 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -9,11 +9,11 @@ namespace ngraph { namespace onnx_import { namespace op { -namespace set_1 { +namespace set_17 { OutputVector hannwindow(const Node& node); -} // namespace set_1 +} // namespace set_17 } // namespace op } // namespace onnx_import } // namespace ngraph \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index 498e8b14ca8610..f16d6731daebf6 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -346,7 +346,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("BatchNormalization", 1, batch_norm); REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BitShift", 1, bitshift); - REGISTER_OPERATOR("BlackManWindow", 1, blackmanwindow); + REGISTER_OPERATOR("BlackManWindow", 17, blackmanwindow); REGISTER_OPERATOR("Cast", 1, cast); REGISTER_OPERATOR("CastLike", 1, cast_like); REGISTER_OPERATOR("Ceil", 1, ceil); @@ -394,8 +394,8 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("Greater", 1, greater); REGISTER_OPERATOR("GridSample", 1, grid_sample); REGISTER_OPERATOR("GRU", 1, gru); - REGISTER_OPERATOR("HannWindow", 1, hannwindow); - REGISTER_OPERATOR("HammingWindow", 1, hammingwindow); + REGISTER_OPERATOR("HannWindow", 17, hannwindow); + REGISTER_OPERATOR("HammingWindow", 17, hammingwindow); REGISTER_OPERATOR("Hardmax", 1, hardmax); REGISTER_OPERATOR("Hardmax", 13, hardmax); REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid); From 10b0a8c438c2134be12e348713f50368028fccd6 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Wed, 6 Sep 2023 20:32:45 +0530 Subject: [PATCH 07/21] made reviewed changes used output_datatype directly, returned y_values directly --- .../onnx/frontend/src/op/blackmanwindow.cpp | 67 ++++--------------- .../onnx/frontend/src/op/hammingwindow.cpp | 63 +++-------------- .../onnx/frontend/src/op/hannwindow.cpp | 61 +++-------------- 3 files changed, 31 insertions(+), 160 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 1a87f659552b08..4e36c174993f7c 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -20,87 +20,44 @@ OutputVector blackmanwindow(const Node& node) { const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); - element::Type tensor_type; - switch (output_datatype) { - case element::Type_t::f32: - tensor_type = element::f32; - break; - case element::Type_t::u8: - tensor_type = element::u8; - break; - case element::Type_t::i8: - tensor_type = element::i8; - break; - case element::Type_t::u16: - tensor_type = element::u16; - break; - case element::Type_t::i16: - tensor_type = element::i16; - break; - case element::Type_t::i32: - tensor_type = element::i32; - break; - case element::Type_t::i64: - tensor_type = element::i64; - break; - case element::Type_t::f16: - tensor_type = element::f16; - break; - case element::Type_t::f64: - tensor_type = element::f64; - break; - case element::Type_t::u32: - tensor_type = element::u32; - break; - case element::Type_t::u64: - tensor_type = element::u64; - break; - case element::Type_t::bf16: - tensor_type = element::bf16; - break; - default: - throw std::runtime_error("Unsupported output data type."); - } - // Weights as described in ONNX BlackManWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow - const auto a_0 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.42}); - const auto a_1 = std::make_shared(tensor_type, ov::Shape(), std::vector{-0.50}); - const auto a_2 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.08}); + const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.42}); + const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{-0.50}); + const auto a_2 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.08}); - const auto start = std::make_shared(tensor_type, ov::Shape(), std::vector{0.0}); - const auto step = std::make_shared(tensor_type, ov::Shape(), std::vector{1.0}); - const auto range = std::make_shared(start, size, step, tensor_type); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0}); + const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0}); + const auto range = std::make_shared(start, size, step, output_datatype); + const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); const auto factor_1 = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(tensor_type, ov::Shape(), std::vector{2})), + std::make_shared(output_datatype, ov::Shape(), std::vector{2})), periodic ? size : std::make_shared( size, - std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto factor_2 = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(tensor_type, ov::Shape(), std::vector{4})), + std::make_shared(output_datatype, ov::Shape(), std::vector{4})), periodic ? size : std::make_shared( size, - std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto cos_1 = std::make_shared(factor_1); const auto cos_2 = std::make_shared(factor_2); const auto scaled_cos_1 = std::make_shared(cos_1, a_1); const auto scaled_cos_2 = std::make_shared(cos_2, a_2); const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); - return {output}; + return {y_values}; } } // namespace set_17 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 4108fbb279e018..818fce73050942 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -20,78 +20,35 @@ OutputVector hammingwindow(const Node& node) { const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); - element::Type tensor_type; - switch (output_datatype) { - case element::Type_t::f32: - tensor_type = element::f32; - break; - case element::Type_t::u8: - tensor_type = element::u8; - break; - case element::Type_t::i8: - tensor_type = element::i8; - break; - case element::Type_t::u16: - tensor_type = element::u16; - break; - case element::Type_t::i16: - tensor_type = element::i16; - break; - case element::Type_t::i32: - tensor_type = element::i32; - break; - case element::Type_t::i64: - tensor_type = element::i64; - break; - case element::Type_t::f16: - tensor_type = element::f16; - break; - case element::Type_t::f64: - tensor_type = element::f64; - break; - case element::Type_t::u32: - tensor_type = element::u32; - break; - case element::Type_t::u64: - tensor_type = element::u64; - break; - case element::Type_t::bf16: - tensor_type = element::bf16; - break; - default: - throw std::runtime_error("Unsupported output data type."); - } - // Weights as described in ONNX BlackManWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow const auto a_0 = std::make_shared( - std::make_shared(tensor_type, ov::Shape(), std::vector{25}), - std::make_shared(tensor_type, ov::Shape(), std::vector{46})); + std::make_shared(output_datatype, ov::Shape(), std::vector{25}), + std::make_shared(output_datatype, ov::Shape(), std::vector{46})); const auto a_1 = std::make_shared( - std::make_shared(tensor_type, ov::Shape(), std::vector{1}), + std::make_shared(output_datatype, ov::Shape(), std::vector{1}), a_0); - const auto start = std::make_shared(tensor_type, ov::Shape(), std::vector{0.0}); - const auto step = std::make_shared(tensor_type, ov::Shape(), std::vector{1.0}); - const auto range = std::make_shared(start, size, step, tensor_type); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0}); + const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0}); + const auto range = std::make_shared(start, size, step, output_datatype); + const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(tensor_type, ov::Shape(), std::vector{2})), + std::make_shared(output_datatype, ov::Shape(), std::vector{2})), periodic ? size : std::make_shared( size, - std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); const auto y_values = std::make_shared(a_0, scaled_cos); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); - return {output}; + return {y_values}; } } // namespace set_17 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 4abab6a9f85f69..2b43cfcf157e2c 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -20,74 +20,31 @@ OutputVector hannwindow(const Node& node) { const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); - element::Type tensor_type; - switch (output_datatype) { - case element::Type_t::f32: - tensor_type = element::f32; - break; - case element::Type_t::u8: - tensor_type = element::u8; - break; - case element::Type_t::i8: - tensor_type = element::i8; - break; - case element::Type_t::u16: - tensor_type = element::u16; - break; - case element::Type_t::i16: - tensor_type = element::i16; - break; - case element::Type_t::i32: - tensor_type = element::i32; - break; - case element::Type_t::i64: - tensor_type = element::i64; - break; - case element::Type_t::f16: - tensor_type = element::f16; - break; - case element::Type_t::f64: - tensor_type = element::f64; - break; - case element::Type_t::u32: - tensor_type = element::u32; - break; - case element::Type_t::u64: - tensor_type = element::u64; - break; - case element::Type_t::bf16: - tensor_type = element::bf16; - break; - default: - throw std::runtime_error("Unsupported output data type."); - } - // Weights as described in ONNX BlackManWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow - const auto a_0 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.5}); - const auto a_1 = std::make_shared(tensor_type, ov::Shape(), std::vector{0.5}); + const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5}); + const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5}); - const auto start = std::make_shared(tensor_type, ov::Shape(), std::vector{0.0}); - const auto step = std::make_shared(tensor_type, ov::Shape(), std::vector{1.0}); - const auto range = std::make_shared(start, size, step, tensor_type); - const auto pi = default_opset::Constant::create(tensor_type, ov::Shape(), {static_cast(M_PI)}); + const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0}); + const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0}); + const auto range = std::make_shared(start, size, step, output_datatype); + const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(tensor_type, ov::Shape(), std::vector{2})), + std::make_shared(output_datatype, ov::Shape(), std::vector{2})), periodic ? size : std::make_shared( size, - std::make_shared(tensor_type, ov::Shape(), std::vector{1})))); + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); const auto y_values = std::make_shared(a_0, scaled_cos); - const auto output = std::make_shared(tensor_type, ov::Shape(axis_lengths), y_values); - return {output}; + return {y_values}; } } // namespace set_17 } // namespace op From 997d07abf5b6f7513867b4af215acee17d5e257c Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Wed, 6 Sep 2023 20:35:26 +0530 Subject: [PATCH 08/21] fixed clang-format --- .../onnx/frontend/src/op/blackmanwindow.cpp | 18 ++++++++++-------- .../onnx/frontend/src/op/hammingwindow.cpp | 9 +++++---- .../onnx/frontend/src/op/hannwindow.cpp | 9 +++++---- 3 files changed, 20 insertions(+), 16 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 4e36c174993f7c..603602cf2f404d 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -36,20 +36,22 @@ OutputVector blackmanwindow(const Node& node) { std::make_shared( pi, std::make_shared(output_datatype, ov::Shape(), std::vector{2})), - periodic ? size - : std::make_shared( - size, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + periodic + ? size + : std::make_shared( + size, + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto factor_2 = std::make_shared( range, std::make_shared( std::make_shared( pi, std::make_shared(output_datatype, ov::Shape(), std::vector{4})), - periodic ? size - : std::make_shared( - size, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + periodic + ? size + : std::make_shared( + size, + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto cos_1 = std::make_shared(factor_1); const auto cos_2 = std::make_shared(factor_2); const auto scaled_cos_1 = std::make_shared(cos_1, a_1); diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 818fce73050942..a7d0406d67e3fc 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -39,10 +39,11 @@ OutputVector hammingwindow(const Node& node) { std::make_shared( pi, std::make_shared(output_datatype, ov::Shape(), std::vector{2})), - periodic ? size - : std::make_shared( - size, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + periodic + ? size + : std::make_shared( + size, + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 2b43cfcf157e2c..dd2ee693619604 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -35,10 +35,11 @@ OutputVector hannwindow(const Node& node) { std::make_shared( pi, std::make_shared(output_datatype, ov::Shape(), std::vector{2})), - periodic ? size - : std::make_shared( - size, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + periodic + ? size + : std::make_shared( + size, + std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); From 372d9317790a0180fadc0fd5f802f9f25d6e2aef Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Thu, 21 Sep 2023 20:08:33 +0530 Subject: [PATCH 09/21] add OPENVINO_SUPPRESS_DEPRECATED_START --- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 4 +++- src/frontends/onnx/frontend/src/op/blackmanwindow.hpp | 4 +++- src/frontends/onnx/frontend/src/op/hammingwindow.cpp | 4 +++- src/frontends/onnx/frontend/src/op/hammingwindow.hpp | 4 +++- src/frontends/onnx/frontend/src/op/hannwindow.cpp | 4 +++- src/frontends/onnx/frontend/src/op/hannwindow.hpp | 4 +++- 6 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 603602cf2f404d..efe48b8bb005a3 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -8,6 +8,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { @@ -64,4 +65,5 @@ OutputVector blackmanwindow(const Node& node) { } // namespace set_17 } // namespace op } // namespace onnx_import -} // namespace ngraph \ No newline at end of file +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index 5d5986c31749b0..cfc0bed830f005 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -6,6 +6,7 @@ #include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { @@ -16,4 +17,5 @@ OutputVector blackmanwindow(const Node& node); } // namespace set_17 } // namespace op } // namespace onnx_import -} // namespace ngraph \ No newline at end of file +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index a7d0406d67e3fc..a62dd75cd8de76 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -8,6 +8,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { @@ -54,4 +55,5 @@ OutputVector hammingwindow(const Node& node) { } // namespace set_17 } // namespace op } // namespace onnx_import -} // namespace ngraph \ No newline at end of file +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp index 2675beb190e8a6..032101fe0a4e42 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -6,6 +6,7 @@ #include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { @@ -16,4 +17,5 @@ OutputVector hammingwindow(const Node& node); } // namespace set_17 } // namespace op } // namespace onnx_import -} // namespace ngraph \ No newline at end of file +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index dd2ee693619604..35ad91be13b7df 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -8,6 +8,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { @@ -50,4 +51,5 @@ OutputVector hannwindow(const Node& node) { } // namespace set_17 } // namespace op } // namespace onnx_import -} // namespace ngraph \ No newline at end of file +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp index 8c2332498cac94..0cf7538ee7713f 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -6,6 +6,7 @@ #include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { @@ -16,4 +17,5 @@ OutputVector hannwindow(const Node& node); } // namespace set_17 } // namespace op } // namespace onnx_import -} // namespace ngraph \ No newline at end of file +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file From e5237a6e18fc3820eb4312feab0e1106ff890290 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Fri, 22 Sep 2023 17:29:43 +0530 Subject: [PATCH 10/21] include math.h --- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 1 + src/frontends/onnx/frontend/src/op/hammingwindow.cpp | 1 + src/frontends/onnx/frontend/src/op/hannwindow.cpp | 1 + 3 files changed, 3 insertions(+) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index efe48b8bb005a3..38c130e108086c 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -7,6 +7,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" +#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index a62dd75cd8de76..d4e5b064294a9d 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -7,6 +7,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" +#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 35ad91be13b7df..101c391f76b898 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -7,6 +7,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" +#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { From 80bf2fcdba1be1dd386806fb1480e7c1bbdfd729 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Mon, 25 Sep 2023 22:24:51 +0530 Subject: [PATCH 11/21] float fix --- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 11 ++++++----- src/frontends/onnx/frontend/src/op/hammingwindow.cpp | 11 ++++++----- src/frontends/onnx/frontend/src/op/hannwindow.cpp | 9 +++++---- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 38c130e108086c..9e0a6e3426dd9d 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -8,6 +8,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" #include +#define _USE_MATH_DEFINES OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -24,12 +25,12 @@ OutputVector blackmanwindow(const Node& node) { // Weights as described in ONNX BlackManWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow - const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.42}); - const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{-0.50}); - const auto a_2 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.08}); + const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.42f}); + const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{-0.50f}); + const auto a_2 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.08f}); - const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0}); - const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0}); + const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); + const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); const auto factor_1 = std::make_shared( diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index d4e5b064294a9d..50b1f6569c655d 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -8,6 +8,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" #include +#define _USE_MATH_DEFINES OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -25,14 +26,14 @@ OutputVector hammingwindow(const Node& node) { // Weights as described in ONNX BlackManWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow const auto a_0 = std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{25}), - std::make_shared(output_datatype, ov::Shape(), std::vector{46})); + std::make_shared(output_datatype, ov::Shape(), std::vector{25.0f}), + std::make_shared(output_datatype, ov::Shape(), std::vector{46.0f})); const auto a_1 = std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{1}), + std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}), a_0); - const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0}); - const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0}); + const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); + const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); const auto factor = std::make_shared( diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 101c391f76b898..596aa4dc7796ea 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -8,6 +8,7 @@ #include "default_opset.hpp" #include "utils/common.hpp" #include +#define _USE_MATH_DEFINES OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -24,11 +25,11 @@ OutputVector hannwindow(const Node& node) { // Weights as described in ONNX BlackManWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow - const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5}); - const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5}); + const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5f}); + const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5f}); - const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0}); - const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0}); + const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); + const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); const auto factor = std::make_shared( From bf359ece6b2b6bcb65d5fd6d9ca2edb989d313cc Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Mon, 2 Oct 2023 17:19:51 +0530 Subject: [PATCH 12/21] fix --- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 4 ++-- src/frontends/onnx/frontend/src/op/blackmanwindow.hpp | 4 +++- src/frontends/onnx/frontend/src/op/hammingwindow.cpp | 4 ++-- src/frontends/onnx/frontend/src/op/hammingwindow.hpp | 4 +++- src/frontends/onnx/frontend/src/op/hannwindow.cpp | 4 ++-- src/frontends/onnx/frontend/src/op/hannwindow.hpp | 4 +++- 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 9e0a6e3426dd9d..b7f60fa07c59db 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -7,8 +7,8 @@ #include "default_opset.hpp" #include "utils/common.hpp" -#include #define _USE_MATH_DEFINES +#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -17,7 +17,7 @@ namespace op { namespace set_17 { OutputVector blackmanwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); const ov::PartialShape shape = size.get_partial_shape(); diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index cfc0bed830f005..0c077a7baab73c 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -3,10 +3,12 @@ #pragma once +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + #include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 50b1f6569c655d..e364c2dda9c296 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -7,8 +7,8 @@ #include "default_opset.hpp" #include "utils/common.hpp" -#include #define _USE_MATH_DEFINES +#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -17,7 +17,7 @@ namespace op { namespace set_17 { OutputVector hammingwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); const ov::PartialShape shape = size.get_partial_shape(); diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp index 032101fe0a4e42..f5c030c4fbf6fa 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -3,10 +3,12 @@ #pragma once +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + #include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 596aa4dc7796ea..2ba8f6cbf92413 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -7,8 +7,8 @@ #include "default_opset.hpp" #include "utils/common.hpp" -#include #define _USE_MATH_DEFINES +#include OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -17,7 +17,7 @@ namespace op { namespace set_17 { OutputVector hannwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); const ov::PartialShape shape = size.get_partial_shape(); diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp index 0cf7538ee7713f..ceb72d7369357b 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -3,10 +3,12 @@ #pragma once +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + #include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { From 8349725f02bfc4386c86cb85dd38b1f00fcbd376 Mon Sep 17 00:00:00 2001 From: Siddhant Chauhan Date: Tue, 3 Oct 2023 10:36:06 +0000 Subject: [PATCH 13/21] fix namespace to set_1 --- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 4 ++-- src/frontends/onnx/frontend/src/op/blackmanwindow.hpp | 4 ++-- src/frontends/onnx/frontend/src/op/hammingwindow.cpp | 4 ++-- src/frontends/onnx/frontend/src/op/hammingwindow.hpp | 4 ++-- src/frontends/onnx/frontend/src/op/hannwindow.cpp | 4 ++-- src/frontends/onnx/frontend/src/op/hannwindow.hpp | 4 ++-- src/frontends/onnx/frontend/src/ops_bridge.cpp | 6 +++--- .../onnx/tests/models/blackmanwindow_periodic.prototxt | 2 +- .../onnx/tests/models/blackmanwindow_symmetric.prototxt | 2 +- .../onnx/tests/models/hammingwindow_periodic.prototxt | 2 +- .../onnx/tests/models/hammingwindow_symmetric.prototxt | 2 +- .../onnx/tests/models/hannwindow_periodic.prototxt | 2 +- .../onnx/tests/models/hannwindow_symmetric.prototxt | 2 +- 13 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index b7f60fa07c59db..138c60fccc85d8 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -14,7 +14,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { -namespace set_17 { +namespace set_1 { OutputVector blackmanwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); @@ -64,7 +64,7 @@ OutputVector blackmanwindow(const Node& node) { return {y_values}; } -} // namespace set_17 +} // namespace set_1 } // namespace op } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index 0c077a7baab73c..ccff09c84817af 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -12,11 +12,11 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { -namespace set_17 { +namespace set_1 { OutputVector blackmanwindow(const Node& node); -} // namespace set_17 +} // namespace set_1 } // namespace op } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index e364c2dda9c296..4fdbe67f6acdc2 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -14,7 +14,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { -namespace set_17 { +namespace set_1 { OutputVector hammingwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); @@ -54,7 +54,7 @@ OutputVector hammingwindow(const Node& node) { return {y_values}; } -} // namespace set_17 +} // namespace set_1 } // namespace op } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp index f5c030c4fbf6fa..d088b4105abc3a 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -12,11 +12,11 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { -namespace set_17 { +namespace set_1 { OutputVector hammingwindow(const Node& node); -} // namespace set_17 +} // namespace set_1 } // namespace op } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 2ba8f6cbf92413..e7f6b65a2109ae 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -14,7 +14,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { -namespace set_17 { +namespace set_1 { OutputVector hannwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); @@ -50,7 +50,7 @@ OutputVector hannwindow(const Node& node) { return {y_values}; } -} // namespace set_17 +} // namespace set_1 } // namespace op } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp index ceb72d7369357b..0c9e6993048ef3 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -12,11 +12,11 @@ OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace op { -namespace set_17 { +namespace set_1 { OutputVector hannwindow(const Node& node); -} // namespace set_17 +} // namespace set_1 } // namespace op } // namespace onnx_import } // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index 8beb9753657d5c..0511696b3451ae 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -348,7 +348,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("BatchNormalization", 1, batch_norm); REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BitShift", 1, bitshift); - REGISTER_OPERATOR("BlackManWindow", 17, blackmanwindow); + REGISTER_OPERATOR("BlackManWindow", 1, blackmanwindow); REGISTER_OPERATOR("Cast", 1, cast); REGISTER_OPERATOR("CastLike", 1, cast_like); REGISTER_OPERATOR("Ceil", 1, ceil); @@ -396,8 +396,8 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("Greater", 1, greater); REGISTER_OPERATOR("GridSample", 1, grid_sample); REGISTER_OPERATOR("GRU", 1, gru); - REGISTER_OPERATOR("HannWindow", 17, hannwindow); - REGISTER_OPERATOR("HammingWindow", 17, hammingwindow); + REGISTER_OPERATOR("HannWindow", 1, hannwindow); + REGISTER_OPERATOR("HammingWindow", 1, hammingwindow); REGISTER_OPERATOR("Hardmax", 1, hardmax); REGISTER_OPERATOR("Hardmax", 13, hardmax); REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid); diff --git a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt index 8f24affd4f403a..db7f084b8970a1 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt @@ -51,5 +51,5 @@ graph { } } opset_import { - version: 12 + version: 1 } diff --git a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt index a46187a448923a..388ed513933785 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt @@ -51,5 +51,5 @@ graph { } } opset_import { - version: 12 + version: 1 } diff --git a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt index e34be9c222070e..db5a3bd341b3ce 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt @@ -51,5 +51,5 @@ graph { } } opset_import { - version: 12 + version: 1 } diff --git a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt index 9bb88aed431131..5dcb11fd046038 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt @@ -51,5 +51,5 @@ graph { } } opset_import { - version: 12 + version: 1 } diff --git a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt index bde433827feefd..a8c6579eb6e4aa 100644 --- a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt @@ -51,5 +51,5 @@ graph { } } opset_import { - version: 12 + version: 1 } diff --git a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt index 725e6a199385a0..27a83a0d13de08 100644 --- a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt @@ -51,5 +51,5 @@ graph { } } opset_import { - version: 12 + version: 1 } From e2287c18fd8e233aba32619da7caeb0fd4490ef3 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Wed, 4 Oct 2023 10:23:10 +0530 Subject: [PATCH 14/21] test fixes --- src/frontends/onnx/frontend/src/ops_bridge.cpp | 2 +- .../onnx/tests/models/blackmanwindow_periodic.prototxt | 7 ++----- .../onnx/tests/models/blackmanwindow_symmetric.prototxt | 7 ++----- .../onnx/tests/models/hammingwindow_periodic.prototxt | 7 ++----- .../onnx/tests/models/hammingwindow_symmetric.prototxt | 7 ++----- .../onnx/tests/models/hannwindow_periodic.prototxt | 7 ++----- .../onnx/tests/models/hannwindow_symmetric.prototxt | 7 ++----- 7 files changed, 13 insertions(+), 31 deletions(-) diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index 0511696b3451ae..c4d9a50c4ca637 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -348,7 +348,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("BatchNormalization", 1, batch_norm); REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BitShift", 1, bitshift); - REGISTER_OPERATOR("BlackManWindow", 1, blackmanwindow); + REGISTER_OPERATOR("BlackmanWindow", 1, blackmanwindow); REGISTER_OPERATOR("Cast", 1, cast); REGISTER_OPERATOR("CastLike", 1, cast_like); REGISTER_OPERATOR("Ceil", 1, ceil); diff --git a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt index db7f084b8970a1..522679385a734e 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt @@ -18,7 +18,6 @@ graph { } name: "test_blackmanwindow_periodic" initializer { - dims: 1 data_type: 7 # INT64 int64_data: 10 name: "size" @@ -29,9 +28,7 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } + } } } @@ -51,5 +48,5 @@ graph { } } opset_import { - version: 1 + version: 17 } diff --git a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt index 388ed513933785..a40aff00745b02 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt @@ -18,7 +18,6 @@ graph { } name: "test_blackmanwindow_symmetric" initializer { - dims: 1 data_type: 7 # INT64 int64_data: 10 name: "size" @@ -29,9 +28,7 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } + } } } @@ -51,5 +48,5 @@ graph { } } opset_import { - version: 1 + version: 17 } diff --git a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt index db5a3bd341b3ce..fffa39a245aab1 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt @@ -18,7 +18,6 @@ graph { } name: "test_hammingwindow_periodic" initializer { - dims: 1 data_type: 7 # INT64 int64_data: 10 name: "size" @@ -29,9 +28,7 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } + } } } @@ -51,5 +48,5 @@ graph { } } opset_import { - version: 1 + version: 17 } diff --git a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt index 5dcb11fd046038..b4819935370892 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt @@ -18,7 +18,6 @@ graph { } name: "test_hammingwindow_symmetric" initializer { - dims: 1 data_type: 7 # INT64 int64_data: 10 name: "size" @@ -29,9 +28,7 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } + } } } @@ -51,5 +48,5 @@ graph { } } opset_import { - version: 1 + version: 17 } diff --git a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt index a8c6579eb6e4aa..b74249d8005813 100644 --- a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt @@ -18,7 +18,6 @@ graph { } name: "test_hannwindow_periodic" initializer { - dims: 1 data_type: 7 # INT64 int64_data: 10 name: "size" @@ -29,9 +28,7 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } + } } } @@ -51,5 +48,5 @@ graph { } } opset_import { - version: 1 + version: 17 } diff --git a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt index 27a83a0d13de08..b86ecb1520e48f 100644 --- a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt @@ -18,7 +18,6 @@ graph { } name: "test_hannwindow_symmetric" initializer { - dims: 1 data_type: 7 # INT64 int64_data: 10 name: "size" @@ -29,9 +28,7 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } + } } } @@ -51,5 +48,5 @@ graph { } } opset_import { - version: 1 + version: 17 } From cf486ce60dd9575e1ae5251a5dd960b374e7367b Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Fri, 6 Oct 2023 22:32:18 +0530 Subject: [PATCH 15/21] fix cast to output_datatype --- .../onnx/frontend/src/op/blackmanwindow.cpp | 29 ++++++++++++------- .../onnx/frontend/src/op/hammingwindow.cpp | 21 +++++++++----- .../onnx/frontend/src/op/hannwindow.cpp | 23 ++++++++++----- 3 files changed, 48 insertions(+), 25 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 138c60fccc85d8..34f1927272193b 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -17,44 +17,53 @@ namespace op { namespace set_1 { OutputVector blackmanwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = + common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); - // Weights as described in ONNX BlackManWindow docs + // Weights as described in ONNX BlackmanWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.42f}); - const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{-0.50f}); + const auto a_1 = + std::make_shared(output_datatype, ov::Shape(), std::vector{-0.50f}); const auto a_2 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.08f}); - const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); + const auto start = + std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); + const auto size_cast = std::make_shared(size, output_datatype); const auto factor_1 = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(output_datatype, ov::Shape(), std::vector{2})), + std::make_shared( + std::make_shared(output_datatype, ov::Shape(), std::vector{2}), + output_datatype)), periodic - ? size + ? size_cast : std::make_shared( - size, + size_cast, std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); const auto factor_2 = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(output_datatype, ov::Shape(), std::vector{4})), + std::make_shared( + std::make_shared(output_datatype, ov::Shape(), std::vector{4}), + output_datatype)), periodic - ? size + ? size_cast : std::make_shared( - size, + size_cast, std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + const auto cos_1 = std::make_shared(factor_1); const auto cos_2 = std::make_shared(factor_2); const auto scaled_cos_1 = std::make_shared(cos_1, a_1); diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 4fdbe67f6acdc2..ea963f5133c168 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -17,13 +17,14 @@ namespace op { namespace set_1 { OutputVector hammingwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = + common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); - // Weights as described in ONNX BlackManWindow docs + // Weights as described in ONNX HammingWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow const auto a_0 = std::make_shared( std::make_shared(output_datatype, ov::Shape(), std::vector{25.0f}), @@ -32,21 +33,27 @@ OutputVector hammingwindow(const Node& node) { std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}), a_0); - const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); + const auto start = + std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); + const auto size_cast = std::make_shared(size, output_datatype); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(output_datatype, ov::Shape(), std::vector{2})), + std::make_shared( + std::make_shared(output_datatype, ov::Shape(), std::vector{2}), + output_datatype)), periodic - ? size + ? size_cast : std::make_shared( - size, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + size_cast, + std::make_shared( + std::make_shared(output_datatype, ov::Shape(), std::vector{1}), + output_datatype)))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index e7f6b65a2109ae..dee91b93c62793 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -17,32 +17,39 @@ namespace op { namespace set_1 { OutputVector hannwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); - const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const auto output_datatype = + common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1); const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); - // Weights as described in ONNX BlackManWindow docs + // Weights as described in ONNX HannWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5f}); - const auto a_1 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5f}); + const auto a_1 = a_0; - const auto start = std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); + const auto start = + std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); + const auto size_cast = std::make_shared(size, output_datatype); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared(output_datatype, ov::Shape(), std::vector{2})), + std::make_shared( + std::make_shared(output_datatype, ov::Shape(), std::vector{2}), + output_datatype)), periodic - ? size + ? size_cast : std::make_shared( - size, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + size_cast, + std::make_shared( + std::make_shared(output_datatype, ov::Shape(), std::vector{1}), + output_datatype)))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); From 898d6916d3a0bb212029fff1831b75403f3a26b5 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Wed, 11 Oct 2023 09:42:40 +0530 Subject: [PATCH 16/21] fix, replace cast with ov::convert --- src/frontends/onnx/frontend/src/op/blackmanwindow.cpp | 6 +++--- src/frontends/onnx/frontend/src/op/hammingwindow.cpp | 6 +++--- src/frontends/onnx/frontend/src/op/hannwindow.cpp | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 34f1927272193b..3511e4f8e22190 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -36,13 +36,13 @@ OutputVector blackmanwindow(const Node& node) { const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); - const auto size_cast = std::make_shared(size, output_datatype); + const auto size_cast = std::make_shared(size, output_datatype); const auto factor_1 = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared( + std::make_shared( std::make_shared(output_datatype, ov::Shape(), std::vector{2}), output_datatype)), periodic @@ -55,7 +55,7 @@ OutputVector blackmanwindow(const Node& node) { std::make_shared( std::make_shared( pi, - std::make_shared( + std::make_shared( std::make_shared(output_datatype, ov::Shape(), std::vector{4}), output_datatype)), periodic diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index ea963f5133c168..597730552d6d70 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -38,20 +38,20 @@ OutputVector hammingwindow(const Node& node) { const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); - const auto size_cast = std::make_shared(size, output_datatype); + const auto size_cast = std::make_shared(size, output_datatype); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared( + std::make_shared( std::make_shared(output_datatype, ov::Shape(), std::vector{2}), output_datatype)), periodic ? size_cast : std::make_shared( size_cast, - std::make_shared( + std::make_shared( std::make_shared(output_datatype, ov::Shape(), std::vector{1}), output_datatype)))); diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index dee91b93c62793..30d0148380cb9d 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -34,20 +34,20 @@ OutputVector hannwindow(const Node& node) { const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); const auto range = std::make_shared(start, size, step, output_datatype); const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); - const auto size_cast = std::make_shared(size, output_datatype); + const auto size_cast = std::make_shared(size, output_datatype); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared( + std::make_shared( std::make_shared(output_datatype, ov::Shape(), std::vector{2}), output_datatype)), periodic ? size_cast : std::make_shared( size_cast, - std::make_shared( + std::make_shared( std::make_shared(output_datatype, ov::Shape(), std::vector{1}), output_datatype)))); From 081cc1c278dc939da4511962cdb16a07b19496ad Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Wed, 11 Oct 2023 18:35:44 +0530 Subject: [PATCH 17/21] fix, use element::f32 --- .../onnx/frontend/src/op/blackmanwindow.cpp | 51 ++++++++++--------- .../onnx/frontend/src/op/hammingwindow.cpp | 36 +++++++------ .../onnx/frontend/src/op/hannwindow.cpp | 32 ++++++------ 3 files changed, 58 insertions(+), 61 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 3511e4f8e22190..526a6da9d56f9e 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -26,43 +26,43 @@ OutputVector blackmanwindow(const Node& node) { // Weights as described in ONNX BlackmanWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow - const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.42f}); + const auto a_0 = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.42f}); const auto a_1 = - std::make_shared(output_datatype, ov::Shape(), std::vector{-0.50f}); - const auto a_2 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.08f}); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{-0.50f}); + const auto a_2 = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.08f}); const auto start = - std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); - const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); - const auto range = std::make_shared(start, size, step, output_datatype); - const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); - const auto size_cast = std::make_shared(size, output_datatype); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto step = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto range = std::make_shared(start, size, step, ov::element::f32); + const auto pi = default_opset::Constant::create(ov::element::f32, ov::Shape(), {static_cast(M_PI)}); const auto factor_1 = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{2}), - output_datatype)), - periodic - ? size_cast - : std::make_shared( - size_cast, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f})), + periodic ? size + : std::make_shared( + size, + std::make_shared(ov::element::f32, + ov::Shape(), + std::vector{1.0f})))); const auto factor_2 = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{4}), - output_datatype)), - periodic - ? size_cast - : std::make_shared( - size_cast, - std::make_shared(output_datatype, ov::Shape(), std::vector{1})))); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f})), + periodic ? size + : std::make_shared( + size, + std::make_shared(ov::element::f32, + ov::Shape(), + std::vector{1.0f})))); const auto cos_1 = std::make_shared(factor_1); const auto cos_2 = std::make_shared(factor_2); @@ -70,8 +70,9 @@ OutputVector blackmanwindow(const Node& node) { const auto scaled_cos_2 = std::make_shared(cos_2, a_2); const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); + const auto final_y_values = std::make_shared(y_values, output_datatype); - return {y_values}; + return {final_y_values}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 597730552d6d70..480986e01dceb4 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -27,39 +27,37 @@ OutputVector hammingwindow(const Node& node) { // Weights as described in ONNX HammingWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow const auto a_0 = std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{25.0f}), - std::make_shared(output_datatype, ov::Shape(), std::vector{46.0f})); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{25.0f}), + std::make_shared(ov::element::f32, ov::Shape(), std::vector{46.0f})); const auto a_1 = std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}), + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}), a_0); const auto start = - std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); - const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); - const auto range = std::make_shared(start, size, step, output_datatype); - const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); - const auto size_cast = std::make_shared(size, output_datatype); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto step = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto range = std::make_shared(start, size, step, ov::element::f32); + const auto pi = default_opset::Constant::create(ov::element::f32, ov::Shape(), {static_cast(M_PI)}); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{2}), - output_datatype)), - periodic - ? size_cast - : std::make_shared( - size_cast, - std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{1}), - output_datatype)))); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f})), + periodic ? size + : std::make_shared( + size, + std::make_shared(ov::element::f32, + ov::Shape(), + std::vector{1.0f})))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); const auto y_values = std::make_shared(a_0, scaled_cos); + const auto final_y_values = std::make_shared(y_values, output_datatype); - return {y_values}; + return {final_y_values}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index 30d0148380cb9d..ff88417a75b1e7 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -26,36 +26,34 @@ OutputVector hannwindow(const Node& node) { // Weights as described in ONNX HannWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow - const auto a_0 = std::make_shared(output_datatype, ov::Shape(), std::vector{0.5f}); + const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.5f}); const auto a_1 = a_0; const auto start = - std::make_shared(output_datatype, ov::Shape(), std::vector{0.0f}); - const auto step = std::make_shared(output_datatype, ov::Shape(), std::vector{1.0f}); - const auto range = std::make_shared(start, size, step, output_datatype); - const auto pi = default_opset::Constant::create(output_datatype, ov::Shape(), {static_cast(M_PI)}); - const auto size_cast = std::make_shared(size, output_datatype); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto step = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto range = std::make_shared(start, size, step, ov::element::f32); + const auto pi = default_opset::Constant::create(ov::element::f32, ov::Shape(), {static_cast(M_PI)}); const auto factor = std::make_shared( range, std::make_shared( std::make_shared( pi, - std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{2}), - output_datatype)), - periodic - ? size_cast - : std::make_shared( - size_cast, - std::make_shared( - std::make_shared(output_datatype, ov::Shape(), std::vector{1}), - output_datatype)))); + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f})), + periodic ? size + : std::make_shared( + size, + std::make_shared(ov::element::f32, + ov::Shape(), + std::vector{1.0f})))); const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); const auto y_values = std::make_shared(a_0, scaled_cos); + const auto final_y_values = std::make_shared(y_values, output_datatype); - return {y_values}; + return {final_y_values}; } } // namespace set_1 } // namespace op From 342ceb2032fdb28900143baf4c2552c54c814f71 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Wed, 18 Oct 2023 22:18:36 +0530 Subject: [PATCH 18/21] major fixes --- .../onnx/frontend/src/op/blackmanwindow.cpp | 65 +++++---- .../onnx/frontend/src/op/hammingwindow.cpp | 44 +++--- .../onnx/frontend/src/op/hannwindow.cpp | 44 +++--- .../models/blackmanwindow_periodic.prototxt | 9 +- .../models/blackmanwindow_symmetric.prototxt | 9 +- .../models/hammingwindow_periodic.prototxt | 9 +- .../models/hammingwindow_symmetric.prototxt | 9 +- .../tests/models/hannwindow_periodic.prototxt | 9 +- .../models/hannwindow_symmetric.prototxt | 9 +- src/frontends/onnx/tests/onnx_import.in.cpp | 128 ++++++++++++------ 10 files changed, 193 insertions(+), 142 deletions(-) diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index 526a6da9d56f9e..8ebca88b32f4cf 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -19,13 +19,14 @@ OutputVector blackmanwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); - const bool periodic = node.get_attribute_value("periodic", 1); + const bool periodic = node.get_attribute_value("periodic", 1) == 1; const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); // Weights as described in ONNX BlackmanWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow + const auto float_size = std::make_shared(size, ov::element::f32); const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.42f}); const auto a_1 = @@ -35,34 +36,35 @@ OutputVector blackmanwindow(const Node& node) { const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto step = + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto range = std::make_shared(start, size, step, ov::element::f32); - const auto pi = default_opset::Constant::create(ov::element::f32, ov::Shape(), {static_cast(M_PI)}); - const auto factor_1 = std::make_shared( - range, - std::make_shared( - std::make_shared( - pi, - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f})), - periodic ? size - : std::make_shared( - size, - std::make_shared(ov::element::f32, - ov::Shape(), - std::vector{1.0f})))); - const auto factor_2 = std::make_shared( - range, - std::make_shared( - std::make_shared( - pi, - std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f})), - periodic ? size - : std::make_shared( - size, - std::make_shared(ov::element::f32, - ov::Shape(), - std::vector{1.0f})))); + const auto two_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto four_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = + default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + std::shared_ptr factor_1, factor_2; + if (periodic) { + factor_1 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + float_size)); + factor_2 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, four_const), + float_size)); + } else { + factor_1 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + factor_2 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, four_const), + std::make_shared(float_size, one_const))); + } const auto cos_1 = std::make_shared(factor_1); const auto cos_2 = std::make_shared(factor_2); @@ -70,9 +72,12 @@ OutputVector blackmanwindow(const Node& node) { const auto scaled_cos_2 = std::make_shared(cos_2, a_2); const auto y_values = std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); - const auto final_y_values = std::make_shared(y_values, output_datatype); - return {final_y_values}; + if (output_datatype == element::f32) { + return {y_values}; + } else { + return {std::make_shared(y_values, output_datatype)}; + } } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 480986e01dceb4..25d557f7de6bdc 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -19,13 +19,14 @@ OutputVector hammingwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); - const bool periodic = node.get_attribute_value("periodic", 1); + const bool periodic = node.get_attribute_value("periodic", 1) == 1; const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); // Weights as described in ONNX HammingWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow + const auto float_size = std::make_shared(size, ov::element::f32); const auto a_0 = std::make_shared( std::make_shared(ov::element::f32, ov::Shape(), std::vector{25.0f}), std::make_shared(ov::element::f32, ov::Shape(), std::vector{46.0f})); @@ -35,29 +36,34 @@ OutputVector hammingwindow(const Node& node) { const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto step = + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto range = std::make_shared(start, size, step, ov::element::f32); - const auto pi = default_opset::Constant::create(ov::element::f32, ov::Shape(), {static_cast(M_PI)}); - const auto factor = std::make_shared( - range, - std::make_shared( - std::make_shared( - pi, - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f})), - periodic ? size - : std::make_shared( - size, - std::make_shared(ov::element::f32, - ov::Shape(), - std::vector{1.0f})))); + const auto two_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = + default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + std::shared_ptr factor; + if (periodic) { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + float_size)); + } else { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + } const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); const auto y_values = std::make_shared(a_0, scaled_cos); - const auto final_y_values = std::make_shared(y_values, output_datatype); - - return {final_y_values}; + if (output_datatype == element::f32) { + return {y_values}; + } else { + return {std::make_shared(y_values, output_datatype)}; + } } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index ff88417a75b1e7..b0e28afd2e5570 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -19,41 +19,47 @@ OutputVector hannwindow(const Node& node) { const auto size = node.get_ng_inputs().at(0); const auto output_datatype = common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); - const bool periodic = node.get_attribute_value("periodic", 1); + const bool periodic = node.get_attribute_value("periodic", 1) == 1; const ov::PartialShape shape = size.get_partial_shape(); const std::vector axis_lengths = shape.to_shape(); // Weights as described in ONNX HannWindow docs // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow + const auto float_size = std::make_shared(size, ov::element::f32); const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.5f}); const auto a_1 = a_0; const auto start = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); - const auto step = + const auto one_const = std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); - const auto range = std::make_shared(start, size, step, ov::element::f32); - const auto pi = default_opset::Constant::create(ov::element::f32, ov::Shape(), {static_cast(M_PI)}); - const auto factor = std::make_shared( - range, - std::make_shared( - std::make_shared( - pi, - std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f})), - periodic ? size - : std::make_shared( - size, - std::make_shared(ov::element::f32, - ov::Shape(), - std::vector{1.0f})))); + const auto two_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = + default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + std::shared_ptr factor; + if (periodic) { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + float_size)); + } else { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + } const auto cos = std::make_shared(factor); const auto scaled_cos = std::make_shared(cos, a_1); const auto y_values = std::make_shared(a_0, scaled_cos); - const auto final_y_values = std::make_shared(y_values, output_datatype); - - return {final_y_values}; + if (output_datatype == element::f32) { + return {y_values}; + } else { + return {std::make_shared(y_values, output_datatype)}; + } } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt index 522679385a734e..d44b234fc914a8 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt @@ -17,18 +17,15 @@ graph { } } name: "test_blackmanwindow_periodic" - initializer { - data_type: 7 # INT64 - int64_data: 10 - name: "size" - } input { name: "size" type { tensor_type { elem_type: 7 # INT64 shape { - + dim { + dim_value: 1 + } } } } diff --git a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt index a40aff00745b02..78fbb370ae9181 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt @@ -17,18 +17,15 @@ graph { } } name: "test_blackmanwindow_symmetric" - initializer { - data_type: 7 # INT64 - int64_data: 10 - name: "size" - } input { name: "size" type { tensor_type { elem_type: 7 # INT64 shape { - + dim { + dim_value: 1 + } } } } diff --git a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt index fffa39a245aab1..fc7241eed2d84f 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt @@ -17,18 +17,15 @@ graph { } } name: "test_hammingwindow_periodic" - initializer { - data_type: 7 # INT64 - int64_data: 10 - name: "size" - } input { name: "size" type { tensor_type { elem_type: 7 # INT64 shape { - + dim { + dim_value: 1 + } } } } diff --git a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt index b4819935370892..3da33ab2a1ef1b 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt @@ -17,18 +17,15 @@ graph { } } name: "test_hammingwindow_symmetric" - initializer { - data_type: 7 # INT64 - int64_data: 10 - name: "size" - } input { name: "size" type { tensor_type { elem_type: 7 # INT64 shape { - + dim { + dim_value: 1 + } } } } diff --git a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt index b74249d8005813..fb6899d4df95f0 100644 --- a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt @@ -17,18 +17,15 @@ graph { } } name: "test_hannwindow_periodic" - initializer { - data_type: 7 # INT64 - int64_data: 10 - name: "size" - } input { name: "size" type { tensor_type { elem_type: 7 # INT64 shape { - + dim { + dim_value: 1 + } } } } diff --git a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt index b86ecb1520e48f..51c6db7a1903cd 100644 --- a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt @@ -17,18 +17,15 @@ graph { } } name: "test_hannwindow_symmetric" - initializer { - data_type: 7 # INT64 - int64_data: 10 - name: "size" - } input { name: "size" type { tensor_type { elem_type: 7 # INT64 shape { - + dim { + dim_value: 1 + } } } } diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index c674ec34f47556..1f354bb1b2a5e1 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6718,87 +6718,139 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unique_3d_with_duplicates_and_axis_2) } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) { - auto function = - onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), - SERIALIZED_ZOO, - "onnx/blackmanwindow_periodic.onnx")); + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/blackmanwindow_periodic.onnx")); auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, {0.0000f, 0.0509f, 0.2580f, 0.6300f, 0.9511f, 0.9511f, 0.6300f, 0.2580f, 0.0509f, 0.0000f}); + test_case.add_expected_output(Shape{10}, + {-0.000000014901161f, + 0.040212844f, + 0.20077012f, + 0.50978714f, + 0.8492299f, + 0.99999994f, + 0.84922975f, + 0.5097869f, + 0.20077008f, + 0.040212862f}); - test_case.run(); + test_case.run_with_tolerance_as_fp(); } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { - auto function = - onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), - SERIALIZED_ZOO, - "onnx/blackmanwindow_symmetric.onnx")); + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/blackmanwindow_symmetric.onnx")); auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, {0.0000f, 0.0509f, 0.2580f, 0.6300f, 0.9511f, 0.9511f, 0.6300f, 0.2580f, 0.0509f, 0.0000f}); + test_case.add_expected_output(Shape{10}, + {-0.00000001f, + 0.05086961f, + 0.25800052f, + 0.63f, + 0.9511299f, + 0.9511298f, + 0.62999994f, + 0.25800028f, + 0.05086958f, + -0.00000001f}); - test_case.run(); + test_case.run_with_tolerance_as_fp(); } - OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { - auto function = - onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), - SERIALIZED_ZOO, - "onnx/hammingwindow_periodic.onnx")); + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hammingwindow_periodic.onnx")); auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, {0.0800f, 0.2533f, 0.6424f, 1.0000f, 0.6424f, 0.2533f, 0.0800f, 0.0000f, 0.0800f, 0.2533f}); + test_case.add_expected_output(Shape{10}, + {0.0869565f, + 0.17413944f, + 0.40238917f, + 0.68452704f, + 0.9127922f, + 1.0000000f, + 0.91284204f, + 0.6846076f, + 0.4024696f, + 0.17418906f}); - test_case.run(); + test_case.run_with_tolerance_as_fp(); } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) { - auto function = - onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), - SERIALIZED_ZOO, - "onnx/hammingwindow_symmetric.onnx")); + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hammingwindow_symmetric.onnx")); auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, {0.0800f, 0.2533f, 0.6424f, 1.0000f, 0.6424f, 0.2533f, 0.0800f, 0.0000f, 0.0800f, 0.2533f}); + test_case.add_expected_output(Shape{10}, + {0.08695650f, + 0.19375625f, + 0.46418557f, + 0.77171463f, + 0.97245550f, + 0.97248441f, + 0.77178812f, + 0.46426883f, + 0.19381064f, + 0.08695650f}); - test_case.run(); + test_case.run_with_tolerance_as_fp(); } - OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { - auto function = - onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), - SERIALIZED_ZOO, - "onnx/hannwindow_periodic.onnx")); + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hannwindow_periodic.onnx")); auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, {0.0000f, 0.1908f, 0.5f, 0.8092f, 1.0000f, 0.8092f, 0.5f, 0.1908f, 0.0000f, 0.0000f}); + test_case.add_expected_output(Shape{10}, + {0.00000000f, + 0.09548607f, + 0.34547389f, + 0.65448201f, + 0.90448672f, + 1.00000000f, + 0.90454125f, + 0.65457022f, + 0.34556198f, + 0.09554043f}); - test_case.run(); + test_case.run_with_tolerance_as_fp(); } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) { - auto function = - onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), - SERIALIZED_ZOO, - "onnx/hannwindow_symmetric.onnx")); + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hannwindow_symmetric.onnx")); auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, {0.0000f, 0.1908f, 0.5f, 0.8092f, 1.0000f, 0.8092f, 0.5f, 0.1908f, 0.0000f, 0.0000f}); + test_case.add_expected_output(Shape{10}, + {0.00000000f, + 0.11697116f, + 0.41315565f, + 0.74997318f, + 0.96983224f, + 0.96986389f, + 0.75005364f, + 0.41324684f, + 0.11703071f, + 0.00000000f}); - test_case.run(); + test_case.run_with_tolerance_as_fp(); } From 986629bcf695f6351a284fc736ecbd39adb248ce Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Thu, 19 Oct 2023 21:09:06 +0530 Subject: [PATCH 19/21] fixes --- .../models/blackmanwindow_periodic.prototxt | 3 -- .../models/blackmanwindow_symmetric.prototxt | 5 +- .../models/hammingwindow_periodic.prototxt | 3 -- .../models/hammingwindow_symmetric.prototxt | 3 -- .../tests/models/hannwindow_periodic.prototxt | 3 -- .../models/hannwindow_symmetric.prototxt | 3 -- src/frontends/onnx/tests/onnx_import.in.cpp | 46 +++++++++++++++---- .../onnx/tests/tests_python/test_backend.py | 6 --- 8 files changed, 39 insertions(+), 33 deletions(-) diff --git a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt index d44b234fc914a8..f8759ce921028a 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt @@ -23,9 +23,6 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } } } } diff --git a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt index 78fbb370ae9181..1d60e783ead99a 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt @@ -11,7 +11,7 @@ graph { type: INT } attribute { - name: "symmetric" + name: "periodic" i: 0 # Set to 1 for periodic, 0 for non-periodic type: INT } @@ -23,9 +23,6 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } } } } diff --git a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt index fc7241eed2d84f..2bf75ed29fe7f6 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt @@ -23,9 +23,6 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } } } } diff --git a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt index 3da33ab2a1ef1b..1c9a9019829383 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt @@ -23,9 +23,6 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } } } } diff --git a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt index fb6899d4df95f0..2895bf5ad9b4d9 100644 --- a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt @@ -23,9 +23,6 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } } } } diff --git a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt index 51c6db7a1903cd..ec2bc2b8e42bef 100644 --- a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt @@ -23,9 +23,6 @@ graph { tensor_type { elem_type: 7 # INT64 shape { - dim { - dim_value: 1 - } } } } diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 1f354bb1b2a5e1..be5de5015de7f1 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6725,7 +6725,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) { auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, + test_case.add_expected_output(Shape{10}, {-0.000000014901161f, 0.040212844f, 0.20077012f, @@ -6737,7 +6737,12 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) { 0.20077008f, 0.040212862f}); - test_case.run_with_tolerance_as_fp(); + // GPU has an accuracy drop, need to use different tolerance + if("${BACKEND_NAME}" != "IE_GPU") { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { @@ -6748,7 +6753,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { auto test_case = ov::test::TestCase(function, s_device); test_case.add_input({10}); - test_case.add_expected_output(Shape{10}, + test_case.add_expected_output(Shape{10}, {-0.00000001f, 0.05086961f, 0.25800052f, @@ -6760,7 +6765,12 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { 0.05086958f, -0.00000001f}); - test_case.run_with_tolerance_as_fp(); + // GPU has an accuracy drop, need to use different tolerance + if("${BACKEND_NAME}" != "IE_GPU") { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { @@ -6783,7 +6793,12 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { 0.4024696f, 0.17418906f}); - test_case.run_with_tolerance_as_fp(); + // GPU has an accuracy drop, need to use different tolerance + if("${BACKEND_NAME}" != "IE_GPU") { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) { @@ -6806,7 +6821,12 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) { 0.19381064f, 0.08695650f}); - test_case.run_with_tolerance_as_fp(); + // GPU has an accuracy drop, need to use different tolerance + if("${BACKEND_NAME}" != "IE_GPU") { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { @@ -6829,7 +6849,12 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { 0.34556198f, 0.09554043f}); - test_case.run_with_tolerance_as_fp(); + // GPU has an accuracy drop, need to use different tolerance + if("${BACKEND_NAME}" != "IE_GPU") { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } } OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) { @@ -6852,5 +6877,10 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) { 0.11703071f, 0.00000000f}); - test_case.run_with_tolerance_as_fp(); + // GPU has an accuracy drop, need to use different tolerance + if("${BACKEND_NAME}" != "IE_GPU") { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } } diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index d1ef686bdd4124..21db0db884d792 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -376,12 +376,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ( xfail_issue_90649, - "OnnxBackendNodeModelTest.test_blackmanwindow_cpu", - "OnnxBackendNodeModelTest.test_blackmanwindow_symmetric_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_symmetric_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_symmetric_cpu", "OnnxBackendNodeModelTest.test_melweightmatrix_cpu", "OnnxBackendNodeModelTest.test_sequence_map_add_1_sequence_1_tensor_cpu", "OnnxBackendNodeModelTest.test_sequence_map_add_2_sequences_cpu", From b63907e8c197c193e199ff9e9d6eb21839156dd5 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Fri, 20 Oct 2023 11:13:55 +0530 Subject: [PATCH 20/21] Update onnx_import.in.cpp --- src/frontends/onnx/tests/onnx_import.in.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index be5de5015de7f1..76653ad5d0845c 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6738,7 +6738,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) { 0.040212862f}); // GPU has an accuracy drop, need to use different tolerance - if("${BACKEND_NAME}" != "IE_GPU") { + if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6766,7 +6766,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { -0.00000001f}); // GPU has an accuracy drop, need to use different tolerance - if("${BACKEND_NAME}" != "IE_GPU") { + if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6794,7 +6794,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { 0.17418906f}); // GPU has an accuracy drop, need to use different tolerance - if("${BACKEND_NAME}" != "IE_GPU") { + if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6822,7 +6822,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) { 0.08695650f}); // GPU has an accuracy drop, need to use different tolerance - if("${BACKEND_NAME}" != "IE_GPU") { + if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6850,7 +6850,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { 0.09554043f}); // GPU has an accuracy drop, need to use different tolerance - if("${BACKEND_NAME}" != "IE_GPU") { + if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6878,7 +6878,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) { 0.00000000f}); // GPU has an accuracy drop, need to use different tolerance - if("${BACKEND_NAME}" != "IE_GPU") { + if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); From 898c94acf5381f42f594142a1884eda7133bd538 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Fri, 20 Oct 2023 17:34:12 +0530 Subject: [PATCH 21/21] Update onnx_import.in.cpp --- src/frontends/onnx/tests/onnx_import.in.cpp | 120 ++++++++++---------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 76653ad5d0845c..361805e45cf0d4 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6726,19 +6726,19 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) { test_case.add_input({10}); test_case.add_expected_output(Shape{10}, - {-0.000000014901161f, - 0.040212844f, - 0.20077012f, - 0.50978714f, - 0.8492299f, - 0.99999994f, - 0.84922975f, - 0.5097869f, - 0.20077008f, - 0.040212862f}); + {-0.000000014901161f, + 0.040212844f, + 0.20077012f, + 0.50978714f, + 0.8492299f, + 0.99999994f, + 0.84922975f, + 0.5097869f, + 0.20077008f, + 0.040212862f}); // GPU has an accuracy drop, need to use different tolerance - if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6754,19 +6754,19 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { test_case.add_input({10}); test_case.add_expected_output(Shape{10}, - {-0.00000001f, - 0.05086961f, - 0.25800052f, - 0.63f, - 0.9511299f, - 0.9511298f, - 0.62999994f, - 0.25800028f, - 0.05086958f, - -0.00000001f}); + {-0.00000001f, + 0.05086961f, + 0.25800052f, + 0.63000000f, + 0.95112991f, + 0.95112979f, + 0.62999994f, + 0.25800028f, + 0.05086958f, + -0.00000001f}); // GPU has an accuracy drop, need to use different tolerance - if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6782,19 +6782,19 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { test_case.add_input({10}); test_case.add_expected_output(Shape{10}, - {0.0869565f, - 0.17413944f, - 0.40238917f, - 0.68452704f, - 0.9127922f, - 1.0000000f, - 0.91284204f, - 0.6846076f, - 0.4024696f, - 0.17418906f}); + {0.08695650f, + 0.17414439f, + 0.40240526f, + 0.68455124f, + 0.91281211f, + 1.00000000f, + 0.91281211f, + 0.68455112f, + 0.40240520f, + 0.17414442f}); // GPU has an accuracy drop, need to use different tolerance - if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6811,18 +6811,18 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) { test_case.add_input({10}); test_case.add_expected_output(Shape{10}, {0.08695650f, - 0.19375625f, - 0.46418557f, - 0.77171463f, - 0.97245550f, - 0.97248441f, - 0.77178812f, - 0.46426883f, - 0.19381064f, + 0.19376230f, + 0.46420413f, + 0.77173913f, + 0.97246838f, + 0.97246838f, + 0.77173907f, + 0.46420389f, + 0.19376221f, 0.08695650f}); // GPU has an accuracy drop, need to use different tolerance - if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6839,18 +6839,18 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { test_case.add_input({10}); test_case.add_expected_output(Shape{10}, {0.00000000f, - 0.09548607f, - 0.34547389f, - 0.65448201f, - 0.90448672f, + 0.09549150f, + 0.34549153f, + 0.65450853f, + 0.90450847f, 1.00000000f, - 0.90454125f, - 0.65457022f, - 0.34556198f, - 0.09554043f}); + 0.90450847f, + 0.65450835f, + 0.34549144f, + 0.09549153f}); // GPU has an accuracy drop, need to use different tolerance - if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f); @@ -6867,18 +6867,18 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) { test_case.add_input({10}); test_case.add_expected_output(Shape{10}, {0.00000000f, - 0.11697116f, - 0.41315565f, - 0.74997318f, - 0.96983224f, - 0.96986389f, - 0.75005364f, - 0.41324684f, - 0.11703071f, + 0.11697778f, + 0.41317594f, + 0.75000000f, + 0.96984637f, + 0.96984625f, + 0.74999994f, + 0.41317570f, + 0.11697769f, 0.00000000f}); // GPU has an accuracy drop, need to use different tolerance - if(std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { test_case.run_with_tolerance_as_fp(); } else { test_case.run_with_tolerance_as_fp(0.01f);