diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp new file mode 100644 index 00000000000000..8ebca88b32f4cf --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "op/blackmanwindow.hpp" + +#include + +#include "default_opset.hpp" +#include "utils/common.hpp" +#define _USE_MATH_DEFINES +#include + +OPENVINO_SUPPRESS_DEPRECATED_START +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector blackmanwindow(const Node& node) { + const auto size = node.get_ng_inputs().at(0); + const auto output_datatype = + common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const bool periodic = node.get_attribute_value("periodic", 1) == 1; + + const ov::PartialShape shape = size.get_partial_shape(); + const std::vector axis_lengths = shape.to_shape(); + + // Weights as described in ONNX BlackmanWindow docs + // https://github.com/onnx/onnx/blob/main/docs/Operators.md#blackmanwindow + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.42f}); + const auto a_1 = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{-0.50f}); + const auto a_2 = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.08f}); + + const auto start = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto four_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{4.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = + default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + std::shared_ptr factor_1, factor_2; + if (periodic) { + factor_1 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + float_size)); + factor_2 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, four_const), + float_size)); + } else { + factor_1 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + factor_2 = std::make_shared( + range, + std::make_shared(std::make_shared(pi, four_const), + std::make_shared(float_size, one_const))); + } + + const auto cos_1 = std::make_shared(factor_1); + const auto cos_2 = std::make_shared(factor_2); + const auto scaled_cos_1 = std::make_shared(cos_1, a_1); + const auto scaled_cos_2 = std::make_shared(cos_2, a_2); + const auto y_values = + std::make_shared(std::make_shared(a_0, scaled_cos_1), scaled_cos_2); + + if (output_datatype == element::f32) { + return {y_values}; + } else { + return {std::make_shared(y_values, output_datatype)}; + } +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp new file mode 100644 index 00000000000000..ccff09c84817af --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { + +OutputVector blackmanwindow(const Node& node); + +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp new file mode 100644 index 00000000000000..25d557f7de6bdc --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "op/hammingwindow.hpp" + +#include + +#include "default_opset.hpp" +#include "utils/common.hpp" +#define _USE_MATH_DEFINES +#include + +OPENVINO_SUPPRESS_DEPRECATED_START +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector hammingwindow(const Node& node) { + const auto size = node.get_ng_inputs().at(0); + const auto output_datatype = + common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const bool periodic = node.get_attribute_value("periodic", 1) == 1; + + const ov::PartialShape shape = size.get_partial_shape(); + const std::vector axis_lengths = shape.to_shape(); + + // Weights as described in ONNX HammingWindow docs + // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hammingwindow + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared( + std::make_shared(ov::element::f32, ov::Shape(), std::vector{25.0f}), + std::make_shared(ov::element::f32, ov::Shape(), std::vector{46.0f})); + const auto a_1 = std::make_shared( + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}), + a_0); + + const auto start = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = + default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + std::shared_ptr factor; + if (periodic) { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + float_size)); + } else { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + } + + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + if (output_datatype == element::f32) { + return {y_values}; + } else { + return {std::make_shared(y_values, output_datatype)}; + } +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp new file mode 100644 index 00000000000000..d088b4105abc3a --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { + +OutputVector hammingwindow(const Node& node); + +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp new file mode 100644 index 00000000000000..b0e28afd2e5570 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "op/hannwindow.hpp" + +#include + +#include "default_opset.hpp" +#include "utils/common.hpp" +#define _USE_MATH_DEFINES +#include + +OPENVINO_SUPPRESS_DEPRECATED_START +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { +OutputVector hannwindow(const Node& node) { + const auto size = node.get_ng_inputs().at(0); + const auto output_datatype = + common::get_ngraph_element_type(node.get_attribute_value("output_datatype", 1)); + const bool periodic = node.get_attribute_value("periodic", 1) == 1; + + const ov::PartialShape shape = size.get_partial_shape(); + const std::vector axis_lengths = shape.to_shape(); + + // Weights as described in ONNX HannWindow docs + // https://github.com/onnx/onnx/blob/main/docs/Operators.md#hannwindow + const auto float_size = std::make_shared(size, ov::element::f32); + const auto a_0 = std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.5f}); + const auto a_1 = a_0; + + const auto start = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{0.0f}); + const auto one_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{1.0f}); + const auto two_const = + std::make_shared(ov::element::f32, ov::Shape(), std::vector{2.0f}); + const auto range = std::make_shared(start, size, one_const, ov::element::f32); + const auto pi = + default_opset::Constant::create(ov::element::f32, ov::Shape(), std::vector{static_cast(M_PI)}); + std::shared_ptr factor; + if (periodic) { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + float_size)); + } else { + factor = std::make_shared( + range, + std::make_shared(std::make_shared(pi, two_const), + std::make_shared(float_size, one_const))); + } + + const auto cos = std::make_shared(factor); + const auto scaled_cos = std::make_shared(cos, a_1); + const auto y_values = std::make_shared(a_0, scaled_cos); + if (output_datatype == element::f32) { + return {y_values}; + } else { + return {std::make_shared(y_values, output_datatype)}; + } +} +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp new file mode 100644 index 00000000000000..0c9e6993048ef3 --- /dev/null +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "openvino/core/deprecated.hpp" +OPENVINO_SUPPRESS_DEPRECATED_START + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph { +namespace onnx_import { +namespace op { +namespace set_1 { + +OutputVector hannwindow(const Node& node); + +} // namespace set_1 +} // namespace op +} // namespace onnx_import +} // namespace ngraph +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index e6707335afd0b8..c4d9a50c4ca637 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -29,6 +29,7 @@ #include "op/average_pool.hpp" #include "op/batch_norm.hpp" #include "op/bitshift.hpp" +#include "op/blackmanwindow.hpp" #include "op/cast.hpp" #include "op/cast_like.hpp" #include "op/ceil.hpp" @@ -75,6 +76,8 @@ #include "op/greater.hpp" #include "op/grid_sample.hpp" #include "op/gru.hpp" +#include "op/hammingwindow.hpp" +#include "op/hannwindow.hpp" #include "op/hard_sigmoid.hpp" #include "op/hard_swish.hpp" #include "op/hardmax.hpp" @@ -345,6 +348,7 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("BatchNormalization", 1, batch_norm); REGISTER_OPERATOR("BatchNormalization", 7, batch_norm); REGISTER_OPERATOR("BitShift", 1, bitshift); + REGISTER_OPERATOR("BlackmanWindow", 1, blackmanwindow); REGISTER_OPERATOR("Cast", 1, cast); REGISTER_OPERATOR("CastLike", 1, cast_like); REGISTER_OPERATOR("Ceil", 1, ceil); @@ -392,6 +396,8 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR("Greater", 1, greater); REGISTER_OPERATOR("GridSample", 1, grid_sample); REGISTER_OPERATOR("GRU", 1, gru); + REGISTER_OPERATOR("HannWindow", 1, hannwindow); + REGISTER_OPERATOR("HammingWindow", 1, hammingwindow); REGISTER_OPERATOR("Hardmax", 1, hardmax); REGISTER_OPERATOR("Hardmax", 13, hardmax); REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid); diff --git a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt new file mode 100644 index 00000000000000..f8759ce921028a --- /dev/null +++ b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt @@ -0,0 +1,46 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "BlackmanWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 1 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_blackmanwindow_periodic" + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 17 +} diff --git a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt new file mode 100644 index 00000000000000..1d60e783ead99a --- /dev/null +++ b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt @@ -0,0 +1,46 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "BlackmanWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 0 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_blackmanwindow_symmetric" + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 17 +} diff --git a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt new file mode 100644 index 00000000000000..2bf75ed29fe7f6 --- /dev/null +++ b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt @@ -0,0 +1,46 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HammingWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 1 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_hammingwindow_periodic" + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 17 +} diff --git a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt new file mode 100644 index 00000000000000..1c9a9019829383 --- /dev/null +++ b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt @@ -0,0 +1,46 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HammingWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 0 # Set to 0 for symmetric, 1 for periodic + type: INT + } + } + name: "test_hammingwindow_symmetric" + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 17 +} diff --git a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt new file mode 100644 index 00000000000000..2895bf5ad9b4d9 --- /dev/null +++ b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt @@ -0,0 +1,46 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HannWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 1 # Set to 1 for periodic, 0 for non-periodic + type: INT + } + } + name: "test_hannwindow_periodic" + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 17 +} diff --git a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt new file mode 100644 index 00000000000000..ec2bc2b8e42bef --- /dev/null +++ b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt @@ -0,0 +1,46 @@ +ir_version: 7 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "size" + output: "y" + op_type: "HannWindow" + attribute { + name: "output_datatype" + i: 1 # Use 1 for f32 + type: INT + } + attribute { + name: "periodic" + i: 0 # Set to 0 for symmetric, 1 for periodic + type: INT + } + } + name: "test_hannwindow_symmetric" + input { + name: "size" + type { + tensor_type { + elem_type: 7 # INT64 + shape { + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 # FLOAT + shape { + dim { + dim_value: 10 # Modify this based on your expected output shape + } + } + } + } + } +} +opset_import { + version: 17 +} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index a442160ed2379c..361805e45cf0d4 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -6716,3 +6716,171 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_unique_3d_with_duplicates_and_axis_2) test_case.run(); } + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_periodic) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/blackmanwindow_periodic.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, + {-0.000000014901161f, + 0.040212844f, + 0.20077012f, + 0.50978714f, + 0.8492299f, + 0.99999994f, + 0.84922975f, + 0.5097869f, + 0.20077008f, + 0.040212862f}); + + // GPU has an accuracy drop, need to use different tolerance + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_blackmanwindow_symmetric) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/blackmanwindow_symmetric.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, + {-0.00000001f, + 0.05086961f, + 0.25800052f, + 0.63000000f, + 0.95112991f, + 0.95112979f, + 0.62999994f, + 0.25800028f, + 0.05086958f, + -0.00000001f}); + + // GPU has an accuracy drop, need to use different tolerance + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_periodic) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hammingwindow_periodic.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, + {0.08695650f, + 0.17414439f, + 0.40240526f, + 0.68455124f, + 0.91281211f, + 1.00000000f, + 0.91281211f, + 0.68455112f, + 0.40240520f, + 0.17414442f}); + + // GPU has an accuracy drop, need to use different tolerance + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hammingwindow_symmetric) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hammingwindow_symmetric.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, + {0.08695650f, + 0.19376230f, + 0.46420413f, + 0.77173913f, + 0.97246838f, + 0.97246838f, + 0.77173907f, + 0.46420389f, + 0.19376221f, + 0.08695650f}); + + // GPU has an accuracy drop, need to use different tolerance + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_periodic) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hannwindow_periodic.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, + {0.00000000f, + 0.09549150f, + 0.34549153f, + 0.65450853f, + 0.90450847f, + 1.00000000f, + 0.90450847f, + 0.65450835f, + 0.34549144f, + 0.09549153f}); + + // GPU has an accuracy drop, need to use different tolerance + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } +} + +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_hannwindow_symmetric) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/hannwindow_symmetric.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + + test_case.add_input({10}); + test_case.add_expected_output(Shape{10}, + {0.00000000f, + 0.11697778f, + 0.41317594f, + 0.75000000f, + 0.96984637f, + 0.96984625f, + 0.74999994f, + 0.41317570f, + 0.11697769f, + 0.00000000f}); + + // GPU has an accuracy drop, need to use different tolerance + if (std::string("${BACKEND_NAME}") != std::string("IE_GPU")) { + test_case.run_with_tolerance_as_fp(); + } else { + test_case.run_with_tolerance_as_fp(0.01f); + } +} diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index d1ef686bdd4124..21db0db884d792 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -376,12 +376,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ( xfail_issue_90649, - "OnnxBackendNodeModelTest.test_blackmanwindow_cpu", - "OnnxBackendNodeModelTest.test_blackmanwindow_symmetric_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_symmetric_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_symmetric_cpu", "OnnxBackendNodeModelTest.test_melweightmatrix_cpu", "OnnxBackendNodeModelTest.test_sequence_map_add_1_sequence_1_tensor_cpu", "OnnxBackendNodeModelTest.test_sequence_map_add_2_sequences_cpu",