From c65927dcaa54c4514800d40a6ec99162a6d81257 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 22 Feb 2024 06:08:36 +0530 Subject: [PATCH 01/47] Create atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/frontends/pytorch/src/op/atan2.cpp diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp new file mode 100644 index 00000000000000..8b137891791fe9 --- /dev/null +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -0,0 +1 @@ + From 38a38915bacace5f801426d0702e9b3f23239f3d Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 22 Feb 2024 06:17:13 +0530 Subject: [PATCH 02/47] Update op_table.cpp --- src/frontends/pytorch/src/op_table.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 8d91951ce6e775..c87edcd91d27bb 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -40,6 +40,7 @@ OP_CONVERTER(translate_argmax); OP_CONVERTER(translate_argmin); OP_CONVERTER(translate_as_strided); OP_CONVERTER(translate_as_tensor); +OP_CONVERTER(translate_atan2); OP_CONVERTER(translate_avg_poolnd); OP_CONVERTER(translate_bool); OP_CONVERTER(translate_batch_norm); @@ -332,6 +333,7 @@ const std::map get_supported_ops_ts() { {"aten::asinh_", op::inplace_op>}, {"aten::atan", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::atan_", op::inplace_op>}, + {"aten::atan2", op::translate_atan2}, {"aten::atanh", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::atanh_", op::inplace_op>}, {"aten::avg_pool1d", op::quantizable_op}, From 9d4141f0839e252faec763120f29dce604b6d8ee Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 22 Feb 2024 06:23:47 +0530 Subject: [PATCH 03/47] Create test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 tests/layer_tests/pytorch_tests/test_atan2.py diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py new file mode 100644 index 00000000000000..8b137891791fe9 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -0,0 +1 @@ + From ff54b63ee48d03f6522006fe7841ccbdeafc1447 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 22 Feb 2024 06:52:19 +0530 Subject: [PATCH 04/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 71 ++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 8b137891791fe9..294a9eca6ff6ee 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -1 +1,72 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +#include "common_op_table.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/atan.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/subtract.hpp" + +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_atan2_op(const NodeContext& node) { + default_op_checks(node, 2, {"Atan2"}); + auto y = node.get_input(0); + auto x = node.get_input(1); + + // handle the first condition : x>0 + auto div_y_x = make_shared(y, x); + auto atan = make_shared(div_y_x); + auto const_zero = create_same_type_const_scalar(x, 0); + auto result = atan->output(0); + + // handle the second condition : x<0 && y>=0 + auto const_pi = create_same_type_const_scalar(x, std::atan(1.0) * 4); + auto is_x_negative = make_shared(x, const_zero); + auto y_non_negative = make_shared(y, const_zero); + auto cond1 = make_shared(is_x_negative, y_non_negative); + auto atan_y_x_plus_pi = make_shared(atan, const_pi); + result = make_shared(cond1, atan_y_x_plus_pi, result); + + // handle the third condition : x<0 && y<0 + auto is_y_negative = make_shared(y, const_zero); + auto cond2 = make_shared(is_x_negative, is_y_negative); + auto atan_y_x_minus_pi = make_shared(atan, const_pi); + result = make_shared(cond2, atan_y_x_minus_pi, result); + + // handle the fourth condition : x=0 && y>0 + auto is_x_zero = make_shared(x, const_zero); + auto is_y_positive = make_shared(y, const_zero); + auto cond3 = make_shared(is_x_zero, is_y_positive); + auto const_two = create_same_type_const_scalar(x, 2); + auto pi_div_two = make_shared(const_pi, const_two); + result = make_shared(cond3, pi_div_two, result); + + // handle the fifth condition : x=0 && y<0 + auto cond4 = make_shared(is_x_zero, is_y_negative); + auto const_minus_two = create_same_type_const_scalar(x, -2); + auto pi_div_minus_two = make_shared(const_pi, const_minus_two); + result = make_shared(cond4, pi_div_two, result); + + set_node_name(node.get_name(), result.get_node_shared_ptr()); + return {result}; +} +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov From a80c3d8dd9300caa3a025d2458c85e1d0720f2a3 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 22 Feb 2024 07:36:58 +0530 Subject: [PATCH 05/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 8b137891791fe9..4c5ed9e3a3bded 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -1 +1,46 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + + +class TestAtan2(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'y' in inputs_info + assert 'x' in inputs_info + y_shape = inputs_info['y'] + x_shape = inputs_info['x'] + inputs_data = {} + inputs_data['y'] = np.random.rand(*y_shape).astype(self.input_type) - np.random.rand(*y_shape).astype(self.input_type) + inputs_data['x'] = np.random.rand(*x_shape).astype(self.input_type) - np.random.rand(*x_shape).astype(self.input_type) + return inputs_data + + def create_atan2_net(self, input_shape, input_type): + self.input_type = input_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + y = tf.compat.v1.placeholder(input_type, input_shape, 'y') + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + tf.raw_ops.Atan2(y=y, x=x) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + return tf_net, None + + test_data_basic = [ + dict(input_shape=[1, 2], input_type=np.float32), + dict(input_shape=[2, 3, 4], input_type=np.float32), + ] + + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_atan2_basic(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_atan2_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) From 9f435bbc210e0f65af2369202ab7b167bfb8a3d5 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 22 Feb 2024 08:10:09 +0530 Subject: [PATCH 06/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 4c5ed9e3a3bded..14e1d0a7d4726d 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -3,11 +3,11 @@ import numpy as np import pytest -import tensorflow as tf -from common.tf_layer_test_class import CommonTFLayerTest +import pytorch +from pytorch_layer_test_class import PytorchLayerTest -class TestAtan2(CommonTFLayerTest): +class TestAtan2(PytorchLayerTest): def _prepare_input(self, inputs_info): assert 'y' in inputs_info assert 'x' in inputs_info From 3b9fd299baa44c82ca474399b795553e5eef40e6 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 3 Mar 2024 08:14:51 +0530 Subject: [PATCH 07/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 294a9eca6ff6ee..4b494a4f4f3dce 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -59,7 +59,7 @@ OutputVector translate_atan2_op(const NodeContext& node) { // handle the fifth condition : x=0 && y<0 auto cond4 = make_shared(is_x_zero, is_y_negative); - auto const_minus_two = create_same_type_const_scalar(x, -2); + auto const_minus_two = ov::op::v0::Constant::create(element::i32, Shape{}, {-2}); auto pi_div_minus_two = make_shared(const_pi, const_minus_two); result = make_shared(cond4, pi_div_two, result); From 36fcd1ddb390bf5c4d539907c2fd878465ad27a3 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 3 Mar 2024 09:14:44 +0530 Subject: [PATCH 08/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 4b494a4f4f3dce..8ec0d8634d11d4 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 #include "common_op_table.hpp" From 004439f2cae70456b3c87757f3d3bb6c507f31c1 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 3 Mar 2024 11:10:47 +0530 Subject: [PATCH 09/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 51 ++++++++----------- 1 file changed, 20 insertions(+), 31 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 14e1d0a7d4726d..240522f052ff49 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -1,13 +1,5 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest -import pytorch -from pytorch_layer_test_class import PytorchLayerTest - - class TestAtan2(PytorchLayerTest): + class TestAtan2(PytorchLayerTest): def _prepare_input(self, inputs_info): assert 'y' in inputs_info assert 'x' in inputs_info @@ -18,29 +10,26 @@ def _prepare_input(self, inputs_info): inputs_data['x'] = np.random.rand(*x_shape).astype(self.input_type) - np.random.rand(*x_shape).astype(self.input_type) return inputs_data - def create_atan2_net(self, input_shape, input_type): - self.input_type = input_type - tf.compat.v1.reset_default_graph() - # Create the graph and model - with tf.compat.v1.Session() as sess: - y = tf.compat.v1.placeholder(input_type, input_shape, 'y') - x = tf.compat.v1.placeholder(input_type, input_shape, 'x') - tf.raw_ops.Atan2(y=y, x=x) - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def + def create_model(self, input_type): + class aten_atan2(torch.nn.Module): + def __init__(self): + super().__init__() + self.input_type = input_type + + def forward(self, input_tensor, other_tensor): + return torch.atan2(input_tensor.to(self.input_type), other_tensor.to(self.input_type)) - return tf_net, None + ref_net = None - test_data_basic = [ - dict(input_shape=[1, 2], input_type=np.float32), - dict(input_shape=[2, 3, 4], input_type=np.float32), - ] + return aten_atan2(), ref_net, "aten::atan2" - @pytest.mark.parametrize("params", test_data_basic) - @pytest.mark.precommit_tf_fe + @pytest.mark.parametrize(("input_type"), [ + (torch.float16), + (torch.int32), + (torch.float64), + (torch.float32), + ]) @pytest.mark.nightly - def test_atan2_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_new_frontend, use_old_api): - self._test(*self.create_atan2_net(**params), - ie_device, precision, ir_version, temp_dir=temp_dir, - use_new_frontend=use_new_frontend, use_old_api=use_old_api) + @pytest.mark.precommit + def test_atan2(self, input_type, ie_device, precision, ir_version): + self._test(*self.create_model(input_type), ie_device, precision, ir_version, use_convert_model=True) From c5941bbe860aab2695874b765e3c5e28a25d7361 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 3 Mar 2024 11:15:40 +0530 Subject: [PATCH 10/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 8ec0d8634d11d4..8db622206ae5a9 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -1,7 +1,6 @@ // Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include "common_op_table.hpp" #include "openvino/op/add.hpp" #include "openvino/op/atan.hpp" #include "openvino/op/constant.hpp" @@ -53,13 +52,13 @@ OutputVector translate_atan2_op(const NodeContext& node) { auto is_x_zero = make_shared(x, const_zero); auto is_y_positive = make_shared(y, const_zero); auto cond3 = make_shared(is_x_zero, is_y_positive); - auto const_two = create_same_type_const_scalar(x, 2); + auto const_two = v0::Constant::create(element::i32, Shape{}, {2}); auto pi_div_two = make_shared(const_pi, const_two); result = make_shared(cond3, pi_div_two, result); // handle the fifth condition : x=0 && y<0 auto cond4 = make_shared(is_x_zero, is_y_negative); - auto const_minus_two = ov::op::v0::Constant::create(element::i32, Shape{}, {-2}); + auto const_minus_two = v0::Constant::create(element::i32, Shape{}, {-2}); auto pi_div_minus_two = make_shared(const_pi, const_minus_two); result = make_shared(cond4, pi_div_two, result); From 3c110e32cf06e5c3ca250637db99d0d690e3cb8e Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 3 Mar 2024 11:40:37 +0530 Subject: [PATCH 11/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 240522f052ff49..f3e2b9846dc173 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -1,3 +1,13 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0i + +import pytest +import torch +import math +import numpy as np + +from pytorch_layer_test_class import PytorchLayerTest + class TestAtan2(PytorchLayerTest): class TestAtan2(PytorchLayerTest): def _prepare_input(self, inputs_info): From 05c11cb8b32f36a8cda38dad2a072e9aee7d251d Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 3 Mar 2024 11:41:14 +0530 Subject: [PATCH 12/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index f3e2b9846dc173..4364bb57f5d67b 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0i import pytest From 7c79090d5775cd94f92da0d236baba5b2030a7e7 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 5 Mar 2024 22:02:21 +0530 Subject: [PATCH 13/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 8db622206ae5a9..e0541a97eb0aec 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +#include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/add.hpp" #include "openvino/op/atan.hpp" #include "openvino/op/constant.hpp" @@ -23,10 +24,10 @@ namespace frontend { namespace pytorch { namespace op { -OutputVector translate_atan2_op(const NodeContext& node) { - default_op_checks(node, 2, {"Atan2"}); - auto y = node.get_input(0); - auto x = node.get_input(1); +OutputVector translate_atan2_op(const NodeContext& context) { + default_op_checks(context, 2, {"Atan2"}); + auto y = context.get_input(0); + auto x = context.get_input(1); // handle the first condition : x>0 auto div_y_x = make_shared(y, x); @@ -62,7 +63,7 @@ OutputVector translate_atan2_op(const NodeContext& node) { auto pi_div_minus_two = make_shared(const_pi, const_minus_two); result = make_shared(cond4, pi_div_two, result); - set_node_name(node.get_name(), result.get_node_shared_ptr()); + set_node_name(context.get_name(), result.get_node_shared_ptr()); return {result}; } } // namespace op From 61097ea1b3a699687af1aec61d17ac71e638695a Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 12 Mar 2024 23:15:31 +0530 Subject: [PATCH 14/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 47 +++++++++++++------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index e0541a97eb0aec..9bdeb8dd8b999b 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -15,6 +15,8 @@ #include "openvino/op/multiply.hpp" #include "openvino/op/select.hpp" #include "openvino/op/subtract.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" using namespace std; using namespace ov::op; @@ -25,45 +27,44 @@ namespace pytorch { namespace op { OutputVector translate_atan2_op(const NodeContext& context) { - default_op_checks(context, 2, {"Atan2"}); + num_inputs_check(context, 2, 2); auto y = context.get_input(0); auto x = context.get_input(1); // handle the first condition : x>0 - auto div_y_x = make_shared(y, x); - auto atan = make_shared(div_y_x); - auto const_zero = create_same_type_const_scalar(x, 0); + auto div_y_x = context.mark_node(make_shared(y, x)); + auto atan = context.mark_node(make_shared(div_y_x)); + auto const_zero = v0::Constant::create(element::i32, Shape{}, {0}); auto result = atan->output(0); // handle the second condition : x<0 && y>=0 - auto const_pi = create_same_type_const_scalar(x, std::atan(1.0) * 4); - auto is_x_negative = make_shared(x, const_zero); - auto y_non_negative = make_shared(y, const_zero); - auto cond1 = make_shared(is_x_negative, y_non_negative); - auto atan_y_x_plus_pi = make_shared(atan, const_pi); - result = make_shared(cond1, atan_y_x_plus_pi, result); + auto const_pi = v0::Constant::create(element::i32, Shape{}, {std::atan(1.0)*4}); + auto is_x_negative = context.mark_node(make_shared(x, const_zero)); + auto y_non_negative = context.mark_node(make_shared(y, const_zero)); + auto cond1 = context.mark_node(make_shared(is_x_negative, y_non_negative)); + auto atan_y_x_plus_pi = context.mark_node(make_shared(atan, const_pi)); + result = context.mark_node(make_shared(cond1, atan_y_x_plus_pi, result)); // handle the third condition : x<0 && y<0 - auto is_y_negative = make_shared(y, const_zero); - auto cond2 = make_shared(is_x_negative, is_y_negative); - auto atan_y_x_minus_pi = make_shared(atan, const_pi); - result = make_shared(cond2, atan_y_x_minus_pi, result); + auto is_y_negative = context.mark_node(make_shared(y, const_zero)); + auto cond2 = context.mark_node(make_shared(is_x_negative, is_y_negative)); + auto atan_y_x_minus_pi = context.mark_node(make_shared(atan, const_pi)); + result = context.mark_node(make_shared(cond2, atan_y_x_minus_pi, result)); // handle the fourth condition : x=0 && y>0 - auto is_x_zero = make_shared(x, const_zero); - auto is_y_positive = make_shared(y, const_zero); - auto cond3 = make_shared(is_x_zero, is_y_positive); + auto is_x_zero = context.mark_node(make_shared(x, const_zero)); + auto is_y_positive = context.mark_node(make_shared(y, const_zero)); + auto cond3 = context.mark_node(make_shared(is_x_zero, is_y_positive)); auto const_two = v0::Constant::create(element::i32, Shape{}, {2}); - auto pi_div_two = make_shared(const_pi, const_two); - result = make_shared(cond3, pi_div_two, result); + auto pi_div_two = context.mark_node(make_shared(const_pi, const_two)); + result = context.mark_node(make_shared(cond3, pi_div_two, result)); // handle the fifth condition : x=0 && y<0 - auto cond4 = make_shared(is_x_zero, is_y_negative); + auto cond4 = context.mark_node(make_shared(is_x_zero, is_y_negative)); auto const_minus_two = v0::Constant::create(element::i32, Shape{}, {-2}); - auto pi_div_minus_two = make_shared(const_pi, const_minus_two); - result = make_shared(cond4, pi_div_two, result); + auto pi_div_minus_two = context.mark_node(make_shared(const_pi, const_minus_two)); + result = context.mark_node(make_shared(cond4, pi_div_two, result)); - set_node_name(context.get_name(), result.get_node_shared_ptr()); return {result}; } } // namespace op From d15d1a087cdda4d4ce6730613c98e3ef9b2da489 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Wed, 13 Mar 2024 20:16:10 +0530 Subject: [PATCH 15/47] Update src/frontends/pytorch/src/op_table.cpp Co-authored-by: Maxim Vafin --- src/frontends/pytorch/src/op_table.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index d0d9d16cf28912..01165447da7f82 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -343,7 +343,6 @@ const std::map get_supported_ops_ts() { {"aten::atan", op::optional_out, 1>}, {"aten::atan_", op::inplace_op>}, {"aten::atan2", op::translate_atan2_op}, - {"aten::atanh", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::atanh", op::optional_out, 1>}, {"aten::atanh_", op::inplace_op>}, From d0da16f8161b3f7f99d14bfd7e0eaf36238c140f Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Wed, 13 Mar 2024 20:17:01 +0530 Subject: [PATCH 16/47] Update tests/layer_tests/pytorch_tests/test_atan2.py Co-authored-by: Mateusz Mikolajczyk --- tests/layer_tests/pytorch_tests/test_atan2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 4364bb57f5d67b..59997ae17487e9 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -9,7 +9,6 @@ from pytorch_layer_test_class import PytorchLayerTest class TestAtan2(PytorchLayerTest): - class TestAtan2(PytorchLayerTest): def _prepare_input(self, inputs_info): assert 'y' in inputs_info assert 'x' in inputs_info From c15ca8067b96448813e0e788d014422854377348 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Wed, 13 Mar 2024 20:23:31 +0530 Subject: [PATCH 17/47] Update op_table.cpp --- src/frontends/pytorch/src/op_table.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 01165447da7f82..2921000c1e0971 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -342,7 +342,7 @@ const std::map get_supported_ops_ts() { {"aten::asinh_", op::inplace_op>}, {"aten::atan", op::optional_out, 1>}, {"aten::atan_", op::inplace_op>}, - {"aten::atan2", op::translate_atan2_op}, + {"aten::atan2", op::translate_atan2}, {"aten::atanh", op::optional_out, 1>}, {"aten::atanh_", op::inplace_op>}, From 14f2a91f05d2c87c4de7bdef9de42d6b365ec431 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Wed, 13 Mar 2024 20:24:37 +0530 Subject: [PATCH 18/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 9bdeb8dd8b999b..5bde3e283474ac 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -26,7 +26,7 @@ namespace frontend { namespace pytorch { namespace op { -OutputVector translate_atan2_op(const NodeContext& context) { +OutputVector translate_atan2(const NodeContext& context) { num_inputs_check(context, 2, 2); auto y = context.get_input(0); auto x = context.get_input(1); From c48f56d0a10146e037b5354c17d37c278a2de215 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Wed, 13 Mar 2024 21:52:06 +0530 Subject: [PATCH 19/47] Update atan2.cpp Remove using namespace std --- src/frontends/pytorch/src/op/atan2.cpp | 39 +++++++++++++------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 5bde3e283474ac..6e1639f7fa0f52 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -18,7 +18,6 @@ #include "pt_framework_node.hpp" #include "utils.hpp" -using namespace std; using namespace ov::op; namespace ov { @@ -32,38 +31,38 @@ OutputVector translate_atan2(const NodeContext& context) { auto x = context.get_input(1); // handle the first condition : x>0 - auto div_y_x = context.mark_node(make_shared(y, x)); - auto atan = context.mark_node(make_shared(div_y_x)); + auto div_y_x = context.mark_node(std::make_shared(y, x)); + auto atan = context.mark_node(std::make_shared(div_y_x)); auto const_zero = v0::Constant::create(element::i32, Shape{}, {0}); auto result = atan->output(0); // handle the second condition : x<0 && y>=0 auto const_pi = v0::Constant::create(element::i32, Shape{}, {std::atan(1.0)*4}); - auto is_x_negative = context.mark_node(make_shared(x, const_zero)); - auto y_non_negative = context.mark_node(make_shared(y, const_zero)); - auto cond1 = context.mark_node(make_shared(is_x_negative, y_non_negative)); - auto atan_y_x_plus_pi = context.mark_node(make_shared(atan, const_pi)); - result = context.mark_node(make_shared(cond1, atan_y_x_plus_pi, result)); + auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); + auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); + auto cond1 = context.mark_node(std::make_shared(is_x_negative, y_non_negative)); + auto atan_y_x_plus_pi = context.mark_node(std::make_shared(atan, const_pi)); + result = context.mark_node(std::make_shared(cond1, atan_y_x_plus_pi, result)); // handle the third condition : x<0 && y<0 - auto is_y_negative = context.mark_node(make_shared(y, const_zero)); - auto cond2 = context.mark_node(make_shared(is_x_negative, is_y_negative)); - auto atan_y_x_minus_pi = context.mark_node(make_shared(atan, const_pi)); - result = context.mark_node(make_shared(cond2, atan_y_x_minus_pi, result)); + auto is_y_negative = context.mark_node(std::make_shared(y, const_zero)); + auto cond2 = context.mark_node(std::make_shared(is_x_negative, is_y_negative)); + auto atan_y_x_minus_pi = context.mark_node(std::make_shared(atan, const_pi)); + result = context.mark_node(std::make_shared(cond2, atan_y_x_minus_pi, result)); // handle the fourth condition : x=0 && y>0 - auto is_x_zero = context.mark_node(make_shared(x, const_zero)); - auto is_y_positive = context.mark_node(make_shared(y, const_zero)); - auto cond3 = context.mark_node(make_shared(is_x_zero, is_y_positive)); + auto is_x_zero = context.mark_node(std::make_shared(x, const_zero)); + auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); + auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); auto const_two = v0::Constant::create(element::i32, Shape{}, {2}); - auto pi_div_two = context.mark_node(make_shared(const_pi, const_two)); - result = context.mark_node(make_shared(cond3, pi_div_two, result)); + auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); + result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); // handle the fifth condition : x=0 && y<0 - auto cond4 = context.mark_node(make_shared(is_x_zero, is_y_negative)); + auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); auto const_minus_two = v0::Constant::create(element::i32, Shape{}, {-2}); - auto pi_div_minus_two = context.mark_node(make_shared(const_pi, const_minus_two)); - result = context.mark_node(make_shared(cond4, pi_div_two, result)); + auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); + result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); return {result}; } From c7d5c39fb26de1c684d554501411a719ceb54d1f Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 14 Mar 2024 00:01:54 +0530 Subject: [PATCH 20/47] Update test_atan2.py Remove inputs_info create tensor --- tests/layer_tests/pytorch_tests/test_atan2.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 59997ae17487e9..1f90193fd1816a 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -9,14 +9,10 @@ from pytorch_layer_test_class import PytorchLayerTest class TestAtan2(PytorchLayerTest): - def _prepare_input(self, inputs_info): - assert 'y' in inputs_info - assert 'x' in inputs_info - y_shape = inputs_info['y'] - x_shape = inputs_info['x'] + def _prepare_input(self, x, y, dtype=None): inputs_data = {} - inputs_data['y'] = np.random.rand(*y_shape).astype(self.input_type) - np.random.rand(*y_shape).astype(self.input_type) - inputs_data['x'] = np.random.rand(*x_shape).astype(self.input_type) - np.random.rand(*x_shape).astype(self.input_type) + inputs_data['y'] = np.array(y).astype(dtype) - np.array(y).astype(dtype) + inputs_data['x'] = np.array(x).astype(dtype) - np.array(x).astype(dtype) return inputs_data def create_model(self, input_type): From 0c6932f77c1897066cd33200fc947ec2dfb31b40 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 14 Mar 2024 00:17:02 +0530 Subject: [PATCH 21/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 6e1639f7fa0f52..2fe0a2fdcaf24a 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -26,7 +26,7 @@ namespace pytorch { namespace op { OutputVector translate_atan2(const NodeContext& context) { - num_inputs_check(context, 2, 2); + num_inputs_check(context, 3, 2); auto y = context.get_input(0); auto x = context.get_input(1); @@ -64,6 +64,8 @@ OutputVector translate_atan2(const NodeContext& context) { auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); + auto result_conv = context.mark_node(std::make_shared(result, context.get_input(2)); + return {result}; } } // namespace op From 82d718f289ee4db661a64049e5c77a4c8b3c2846 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 14 Mar 2024 00:23:17 +0530 Subject: [PATCH 22/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 2fe0a2fdcaf24a..b5d90bd66b4ce6 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -33,11 +33,11 @@ OutputVector translate_atan2(const NodeContext& context) { // handle the first condition : x>0 auto div_y_x = context.mark_node(std::make_shared(y, x)); auto atan = context.mark_node(std::make_shared(div_y_x)); - auto const_zero = v0::Constant::create(element::i32, Shape{}, {0}); + auto const_zero = v0::Constant::create(element::f64, Shape{}, {0}); auto result = atan->output(0); // handle the second condition : x<0 && y>=0 - auto const_pi = v0::Constant::create(element::i32, Shape{}, {std::atan(1.0)*4}); + auto const_pi = v0::Constant::create(element::f64, Shape{}, {std::atan(1.0)*4}); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond1 = context.mark_node(std::make_shared(is_x_negative, y_non_negative)); @@ -54,13 +54,13 @@ OutputVector translate_atan2(const NodeContext& context) { auto is_x_zero = context.mark_node(std::make_shared(x, const_zero)); auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); - auto const_two = v0::Constant::create(element::i32, Shape{}, {2}); + auto const_two = v0::Constant::create(element::f64, Shape{}, {2}); auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); // handle the fifth condition : x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); - auto const_minus_two = v0::Constant::create(element::i32, Shape{}, {-2}); + auto const_minus_two = v0::Constant::create(element::f64, Shape{}, {-2}); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); From 0da1b30fb92c7f9249580a7b38df8db70fa06a90 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 14 Mar 2024 00:30:59 +0530 Subject: [PATCH 23/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index b5d90bd66b4ce6..30ae3d114c840b 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -66,7 +66,7 @@ OutputVector translate_atan2(const NodeContext& context) { auto result_conv = context.mark_node(std::make_shared(result, context.get_input(2)); - return {result}; + return {result_conv}; } } // namespace op } // namespace pytorch From eb6bdbe50d6968f536aac9cfa5046f7e1adf4625 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 17 Mar 2024 20:22:22 +0530 Subject: [PATCH 24/47] Update atan2.cpp make input types same --- src/frontends/pytorch/src/op/atan2.cpp | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 30ae3d114c840b..750198663c29ee 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -29,42 +29,52 @@ OutputVector translate_atan2(const NodeContext& context) { num_inputs_check(context, 3, 2); auto y = context.get_input(0); auto x = context.get_input(1); + std::tie(y, x) = get_inputs_with_promoted_types(context, 0, 1); + auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); + align_eltwise_input_types(context, x, dummy_const, false, true); // handle the first condition : x>0 auto div_y_x = context.mark_node(std::make_shared(y, x)); auto atan = context.mark_node(std::make_shared(div_y_x)); - auto const_zero = v0::Constant::create(element::f64, Shape{}, {0}); + auto const_zero = v0::Constant::create(element::f32, Shape{}, {0}); auto result = atan->output(0); // handle the second condition : x<0 && y>=0 - auto const_pi = v0::Constant::create(element::f64, Shape{}, {std::atan(1.0)*4}); + auto const_pi = v0::Constant::create(element::f32, Shape{}, {std::atan(1.0)*4}); + align_eltwise_input_types(context, x, dummy_const, false, true); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); + align_eltwise_input_types(context, y, const_zero, false, true); auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond1 = context.mark_node(std::make_shared(is_x_negative, y_non_negative)); + align_eltwise_input_types(context, atan, const_pi, false, true); auto atan_y_x_plus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond1, atan_y_x_plus_pi, result)); // handle the third condition : x<0 && y<0 + align_eltwise_input_types(context, x, const_zero, false, true); auto is_y_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond2 = context.mark_node(std::make_shared(is_x_negative, is_y_negative)); + align_eltwise_input_types(context, atan, const_pi, false, true); auto atan_y_x_minus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond2, atan_y_x_minus_pi, result)); // handle the fourth condition : x=0 && y>0 + align_eltwise_input_types(context, x, const_zero, false, true); auto is_x_zero = context.mark_node(std::make_shared(x, const_zero)); + align_eltwise_input_types(context, y, const_zero, false, true); auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); - auto const_two = v0::Constant::create(element::f64, Shape{}, {2}); + auto const_two = v0::Constant::create(element::f32, Shape{}, {2}); auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); // handle the fifth condition : x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); - auto const_minus_two = v0::Constant::create(element::f64, Shape{}, {-2}); + auto const_minus_two = v0::Constant::create(element::f32, Shape{}, {-2}); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); - auto result_conv = context.mark_node(std::make_shared(result, context.get_input(2)); + auto result_conv = context.mark_node(std::make_shared(result, element::context.get_input(2)); return {result_conv}; } From b72910c57b380567cf1dc3637fee0647e7257f03 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 17 Mar 2024 22:10:26 +0530 Subject: [PATCH 25/47] Update atan2.cpp consider out --- src/frontends/pytorch/src/op/atan2.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 750198663c29ee..f0abcbd2d0a2e5 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -26,7 +26,7 @@ namespace pytorch { namespace op { OutputVector translate_atan2(const NodeContext& context) { - num_inputs_check(context, 3, 2); + num_inputs_check(context, 3, 3); auto y = context.get_input(0); auto x = context.get_input(1); std::tie(y, x) = get_inputs_with_promoted_types(context, 0, 1); @@ -73,10 +73,11 @@ OutputVector translate_atan2(const NodeContext& context) { auto const_minus_two = v0::Constant::create(element::f32, Shape{}, {-2}); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); + + out_tensor = context.get_input(3); + auto result_out = context.mark_node(std::make_shared(result,out_tensor); - auto result_conv = context.mark_node(std::make_shared(result, element::context.get_input(2)); - - return {result_conv}; + return {result_out}; } } // namespace op } // namespace pytorch From c8aa285843877f61f39230de87ac8d52b107447d Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 17 Mar 2024 22:10:46 +0530 Subject: [PATCH 26/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index f0abcbd2d0a2e5..f180996c20cb1f 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -74,7 +74,7 @@ OutputVector translate_atan2(const NodeContext& context) { auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); - out_tensor = context.get_input(3); + out_tensor = context.get_input(2); auto result_out = context.mark_node(std::make_shared(result,out_tensor); return {result_out}; From f7409c7d4efef6f7383733e3acf8f2fc2f5ba415 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 17 Mar 2024 23:09:04 +0530 Subject: [PATCH 27/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 73 +++++++++++++------ 1 file changed, 51 insertions(+), 22 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 1f90193fd1816a..dfa46e4e997d9d 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -9,32 +9,61 @@ from pytorch_layer_test_class import PytorchLayerTest class TestAtan2(PytorchLayerTest): - def _prepare_input(self, x, y, dtype=None): - inputs_data = {} - inputs_data['y'] = np.array(y).astype(dtype) - np.array(y).astype(dtype) - inputs_data['x'] = np.array(x).astype(dtype) - np.array(x).astype(dtype) - return inputs_data + def _prepare_input(self, y, x, ref_dtype=None): + inputs = [np.array(y).astype(ref_dtype) - np.array(y).astype(ref_dtype), np.array(x).astype(ref_dtype) - np.array(x).astype(ref_dtype)] + if ref_dtype: + inputs.append(np.zeros(1).astype(ref_dtype)) + return inputs - def create_model(self, input_type): - class aten_atan2(torch.nn.Module): - def __init__(self): + def create_model(self, dtype=None, use_out=False): + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8, + } + + class aten_atan2_out(torch.nn.Module): + def __init__(self, out) -> None: super().__init__() - self.input_type = input_type + self.out = torch.empty(25, dtype=out) + + def forward(self, y, x): + return torch.atan2(input = y, other = x, out=self.out) - def forward(self, input_tensor, other_tensor): - return torch.atan2(input_tensor.to(self.input_type), other_tensor.to(self.input_type)) + class aten_atan2(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + + def forward(self, y, x): + return torch.atan2(input = y, other = x) + + dtype = dtype_map.get(dtype) + if out_use: + model_class = aten_atan2_out(dtype) + else: + model_class = aten_atan2() + + ref_net = None - - return aten_atan2(), ref_net, "aten::atan2" - - @pytest.mark.parametrize(("input_type"), [ - (torch.float16), - (torch.int32), - (torch.float64), - (torch.float32), - ]) + + return model_class, ref_net, "aten::atan2" + @pytest.mark.nightly @pytest.mark.precommit - def test_atan2(self, input_type, ie_device, precision, ir_version): - self._test(*self.create_model(input_type), ie_device, precision, ir_version, use_convert_model=True) + @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8", "uin8"]) + @pytest.mark.parametrize( + "start,end,steps", [(0, 1), (0, 0), (1, -5), (1, 10), (-1, -5), (-1, -5), (1.25, -5.5)] + ) + @pytest.mark.parametrize("use_out", [False, True]) + def test_linspace_with_out(self, dtype, use_out, y, x, ie_device, precision, ir_version): + self._test( + *self.create_model(dtype=dtype, use_out=use_out), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"y": y, "x": x} + ) From e2c6c6fa4b07b137ac197dfdec12acc3bad979dd Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sun, 17 Mar 2024 23:43:26 +0530 Subject: [PATCH 28/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index f180996c20cb1f..6ad76ea2ee1262 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -1,10 +1,13 @@ // Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +// #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/add.hpp" #include "openvino/op/atan.hpp" +#include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" #include "openvino/op/convert_like.hpp" #include "openvino/op/divide.hpp" #include "openvino/op/equal.hpp" @@ -13,6 +16,7 @@ #include "openvino/op/less.hpp" #include "openvino/op/logical_and.hpp" #include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" #include "openvino/op/select.hpp" #include "openvino/op/subtract.hpp" #include "pt_framework_node.hpp" @@ -27,6 +31,7 @@ namespace op { OutputVector translate_atan2(const NodeContext& context) { num_inputs_check(context, 3, 3); + // "aten::atan2.out(Tensor input,Tensor other, *,Tensor(a!) out) → Tensor(a!)" auto y = context.get_input(0); auto x = context.get_input(1); std::tie(y, x) = get_inputs_with_promoted_types(context, 0, 1); @@ -34,6 +39,7 @@ OutputVector translate_atan2(const NodeContext& context) { align_eltwise_input_types(context, x, dummy_const, false, true); // handle the first condition : x>0 + align_eltwise_input_types(context, y, x, false, true); auto div_y_x = context.mark_node(std::make_shared(y, x)); auto atan = context.mark_node(std::make_shared(div_y_x)); auto const_zero = v0::Constant::create(element::f32, Shape{}, {0}); @@ -41,7 +47,7 @@ OutputVector translate_atan2(const NodeContext& context) { // handle the second condition : x<0 && y>=0 auto const_pi = v0::Constant::create(element::f32, Shape{}, {std::atan(1.0)*4}); - align_eltwise_input_types(context, x, dummy_const, false, true); + align_eltwise_input_types(context, x, const_pi, false, true); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); align_eltwise_input_types(context, y, const_zero, false, true); auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); @@ -65,19 +71,28 @@ OutputVector translate_atan2(const NodeContext& context) { auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); auto const_two = v0::Constant::create(element::f32, Shape{}, {2}); + align_eltwise_input_types(context, const_pi, const_two, false, true); auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); // handle the fifth condition : x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); auto const_minus_two = v0::Constant::create(element::f32, Shape{}, {-2}); + align_eltwise_input_types(context, const_pi, const_minus_two, false, true); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); - + + // check whether out tensor is given + if(!context.input_is_none(2) && context.get_input_size == 3){ out_tensor = context.get_input(2); + // dtype is inherited from out tensor in input auto result_out = context.mark_node(std::make_shared(result,out_tensor); return {result_out}; + } + + // when out tensor is not in input + return{result}; } } // namespace op } // namespace pytorch From 2cb6863af92d48caefb6d37cfe759563208691b2 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 19 Mar 2024 18:25:35 +0530 Subject: [PATCH 29/47] Update src/frontends/pytorch/src/op/atan2.cpp Co-authored-by: Mateusz Mikolajczyk --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 6ad76ea2ee1262..97bc99edd7da3c 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -30,7 +30,7 @@ namespace pytorch { namespace op { OutputVector translate_atan2(const NodeContext& context) { - num_inputs_check(context, 3, 3); + num_inputs_check(context, 2, 3); // "aten::atan2.out(Tensor input,Tensor other, *,Tensor(a!) out) → Tensor(a!)" auto y = context.get_input(0); auto x = context.get_input(1); From 59dc55b40462919125055e703057afb1ad5f973e Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 19 Mar 2024 18:26:27 +0530 Subject: [PATCH 30/47] Update src/frontends/pytorch/src/op/atan2.cpp Co-authored-by: Mateusz Mikolajczyk --- src/frontends/pytorch/src/op/atan2.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 97bc99edd7da3c..fb153ec7d552f6 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -32,8 +32,8 @@ namespace op { OutputVector translate_atan2(const NodeContext& context) { num_inputs_check(context, 2, 3); // "aten::atan2.out(Tensor input,Tensor other, *,Tensor(a!) out) → Tensor(a!)" - auto y = context.get_input(0); - auto x = context.get_input(1); + Output y; + Output x; std::tie(y, x) = get_inputs_with_promoted_types(context, 0, 1); auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); align_eltwise_input_types(context, x, dummy_const, false, true); From 020b5aeb98d3a1cb4d2c2e7a3c574cae8650aabb Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 13:25:56 +0530 Subject: [PATCH 31/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index dfa46e4e997d9d..b98dea844d20b2 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -54,9 +54,9 @@ def forward(self, y, x): @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8", "uin8"]) + @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8", "uint8"]) @pytest.mark.parametrize( - "start,end,steps", [(0, 1), (0, 0), (1, -5), (1, 10), (-1, -5), (-1, -5), (1.25, -5.5)] + "y, x", [(0, 1), (0, 0), (1, -5), (1, 10), (-1, -5), (-1, -5), (1.25, -5.5)] ) @pytest.mark.parametrize("use_out", [False, True]) def test_linspace_with_out(self, dtype, use_out, y, x, ie_device, precision, ir_version): From 4d31d2fdbcf41868f4200a43d261dada535e71ba Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:05:57 +0530 Subject: [PATCH 32/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index fb153ec7d552f6..12df070b0842b2 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -46,7 +46,7 @@ OutputVector translate_atan2(const NodeContext& context) { auto result = atan->output(0); // handle the second condition : x<0 && y>=0 - auto const_pi = v0::Constant::create(element::f32, Shape{}, {std::atan(1.0)*4}); + auto const_pi = v0::Constant::create(element::f32, Shape{}, {std::atan(1.0) * 4}); align_eltwise_input_types(context, x, const_pi, false, true); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); align_eltwise_input_types(context, y, const_zero, false, true); @@ -83,16 +83,16 @@ OutputVector translate_atan2(const NodeContext& context) { result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); // check whether out tensor is given - if(!context.input_is_none(2) && context.get_input_size == 3){ - out_tensor = context.get_input(2); - // dtype is inherited from out tensor in input + if(!context.input_is_none(2) && context.get_input_size == 3) { + out_tensor = context.get_input(2); + // dtype is inherited from out tensor in input auto result_out = context.mark_node(std::make_shared(result,out_tensor); return {result_out}; } // when out tensor is not in input - return{result}; + return {result}; } } // namespace op } // namespace pytorch From 5cc47bb8b6adefa27cf051e7a67053c6c9d2f171 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:29:52 +0530 Subject: [PATCH 33/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 12df070b0842b2..7ecf95c8128e89 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -83,10 +83,10 @@ OutputVector translate_atan2(const NodeContext& context) { result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); // check whether out tensor is given - if(!context.input_is_none(2) && context.get_input_size == 3) { - out_tensor = context.get_input(2); + if(!context.input_is_none(2) && context.get_input_size() == 3) { + auto out_tensor = context.get_input(2); // dtype is inherited from out tensor in input - auto result_out = context.mark_node(std::make_shared(result,out_tensor); + auto result_out = context.mark_node(std::make_shared(result, out_tensor); return {result_out}; } From ae81808711ec393b6b03050735adfb739f560915 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:35:06 +0530 Subject: [PATCH 34/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 7ecf95c8128e89..8ec440bf1dfebf 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -86,7 +86,7 @@ OutputVector translate_atan2(const NodeContext& context) { if(!context.input_is_none(2) && context.get_input_size() == 3) { auto out_tensor = context.get_input(2); // dtype is inherited from out tensor in input - auto result_out = context.mark_node(std::make_shared(result, out_tensor); + auto result_out = context.mark_node(std::make_shared(result, out_tensor)); return {result_out}; } From 0aa717498af5cc75bb31e90a6226ded28dde8979 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 16:40:06 +0530 Subject: [PATCH 35/47] Update op_table.cpp --- src/frontends/pytorch/src/op_table.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index f705cace91fc3b..6adcdc81185a01 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -359,7 +359,7 @@ const std::map get_supported_ops_ts() { {"aten::asinh_", op::inplace_op>}, {"aten::atan", op::optional_out, 1>}, {"aten::atan_", op::inplace_op>}, - {"aten::atan2", op::translate_atan2}, + {"aten::atan2", op::optional_out}, {"aten::atanh", op::optional_out, 1>}, {"aten::atanh_", op::inplace_op>}, From 24f019870ac87c548a02b55c402ed0384dfb9350 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 19:20:13 +0530 Subject: [PATCH 36/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 34 +++++++++++++------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 8ec440bf1dfebf..f19a6aad89dd6c 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -30,16 +30,18 @@ namespace pytorch { namespace op { OutputVector translate_atan2(const NodeContext& context) { + // Check whether inputs present num_inputs_check(context, 2, 3); // "aten::atan2.out(Tensor input,Tensor other, *,Tensor(a!) out) → Tensor(a!)" Output y; Output x; + // tie inputs together std::tie(y, x) = get_inputs_with_promoted_types(context, 0, 1); auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); + // align input types align_eltwise_input_types(context, x, dummy_const, false, true); // handle the first condition : x>0 - align_eltwise_input_types(context, y, x, false, true); auto div_y_x = context.mark_node(std::make_shared(y, x)); auto atan = context.mark_node(std::make_shared(div_y_x)); auto const_zero = v0::Constant::create(element::f32, Shape{}, {0}); @@ -47,53 +49,51 @@ OutputVector translate_atan2(const NodeContext& context) { // handle the second condition : x<0 && y>=0 auto const_pi = v0::Constant::create(element::f32, Shape{}, {std::atan(1.0) * 4}); - align_eltwise_input_types(context, x, const_pi, false, true); + // Same input type + auto x = context.mark_node(std::make_shared(x, const_pi)); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); - align_eltwise_input_types(context, y, const_zero, false, true); + auto y = context.mark_node(std::make_shared(y, const_zero)); auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond1 = context.mark_node(std::make_shared(is_x_negative, y_non_negative)); - align_eltwise_input_types(context, atan, const_pi, false, true); + auto atan = context.mark_node(std::make_shared(atan, const_pi)); auto atan_y_x_plus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond1, atan_y_x_plus_pi, result)); // handle the third condition : x<0 && y<0 - align_eltwise_input_types(context, x, const_zero, false, true); + auto y = context.mark_node(std::make_shared(y, const_zero)); auto is_y_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond2 = context.mark_node(std::make_shared(is_x_negative, is_y_negative)); - align_eltwise_input_types(context, atan, const_pi, false, true); + auto y = context.mark_node(std::make_shared(atan, const_pi)); auto atan_y_x_minus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond2, atan_y_x_minus_pi, result)); // handle the fourth condition : x=0 && y>0 - align_eltwise_input_types(context, x, const_zero, false, true); + auto x = context.mark_node(std::make_shared(x, const_zero)); auto is_x_zero = context.mark_node(std::make_shared(x, const_zero)); - align_eltwise_input_types(context, y, const_zero, false, true); + auto y = context.mark_node(std::make_shared(y, const_zero)); auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); auto const_two = v0::Constant::create(element::f32, Shape{}, {2}); - align_eltwise_input_types(context, const_pi, const_two, false, true); + auto const_pi = context.mark_node(std::make_shared(const_pi, const_two)); auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); // handle the fifth condition : x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); auto const_minus_two = v0::Constant::create(element::f32, Shape{}, {-2}); - align_eltwise_input_types(context, const_pi, const_minus_two, false, true); + auto const_minus_two = context.mark_node(std::make_shared(const_minus_two, const_pi)); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); // check whether out tensor is given if(!context.input_is_none(2) && context.get_input_size() == 3) { - auto out_tensor = context.get_input(2); - // dtype is inherited from out tensor in input - auto result_out = context.mark_node(std::make_shared(result, out_tensor)); - - return {result_out}; - } + context.mutate_input(2, result); + } // when out tensor is not in input return {result}; -} +}; + } // namespace op } // namespace pytorch } // namespace frontend From c6c066c25d6db3a50c7646c0ee42f66c32bc8fa6 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 20:44:50 +0530 Subject: [PATCH 37/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index b98dea844d20b2..75111b37b85669 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -9,10 +9,8 @@ from pytorch_layer_test_class import PytorchLayerTest class TestAtan2(PytorchLayerTest): - def _prepare_input(self, y, x, ref_dtype=None): - inputs = [np.array(y).astype(ref_dtype) - np.array(y).astype(ref_dtype), np.array(x).astype(ref_dtype) - np.array(x).astype(ref_dtype)] - if ref_dtype: - inputs.append(np.zeros(1).astype(ref_dtype)) + def _prepare_input(self, y, x, dtype): + inputs = [np.array(y).astype(dtype), np.array(x).astype(dtype)] return inputs def create_model(self, dtype=None, use_out=False): @@ -42,7 +40,7 @@ def forward(self, y, x): dtype = dtype_map.get(dtype) - if out_use: + if use_out: model_class = aten_atan2_out(dtype) else: model_class = aten_atan2() @@ -56,10 +54,10 @@ def forward(self, y, x): @pytest.mark.precommit @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8", "uint8"]) @pytest.mark.parametrize( - "y, x", [(0, 1), (0, 0), (1, -5), (1, 10), (-1, -5), (-1, -5), (1.25, -5.5)] + "y, x", [(0, 1.5), (0, 0), (1.25, -5), (1, 10), (-1, -5.5), (-1, -5), (1.25, -5.5), (1.9, 2.9), [10, 9.9]] ) @pytest.mark.parametrize("use_out", [False, True]) - def test_linspace_with_out(self, dtype, use_out, y, x, ie_device, precision, ir_version): + def test_atan2_with_out(self, dtype, use_out, y, x, ie_device, precision, ir_version): self._test( *self.create_model(dtype=dtype, use_out=use_out), ie_device, From 7d38db0c5ebf38d1aa06a87b4692887c171a60fa Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Sat, 23 Mar 2024 21:03:33 +0530 Subject: [PATCH 38/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index f19a6aad89dd6c..b81802f657bcdc 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -88,7 +88,7 @@ OutputVector translate_atan2(const NodeContext& context) { // check whether out tensor is given if(!context.input_is_none(2) && context.get_input_size() == 3) { context.mutate_input(2, result); - } + } // when out tensor is not in input return {result}; From 9bc5e27308664fb53e346d7d876ddf2d1730f708 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Mon, 25 Mar 2024 23:28:48 +0530 Subject: [PATCH 39/47] Update atan2.cpp fix build issues and code style --- src/frontends/pytorch/src/op/atan2.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index b81802f657bcdc..8ddd8380ffe100 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -50,50 +50,50 @@ OutputVector translate_atan2(const NodeContext& context) { // handle the second condition : x<0 && y>=0 auto const_pi = v0::Constant::create(element::f32, Shape{}, {std::atan(1.0) * 4}); // Same input type - auto x = context.mark_node(std::make_shared(x, const_pi)); + x = context.mark_node(std::make_shared(x, const_pi)); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); - auto y = context.mark_node(std::make_shared(y, const_zero)); + y = context.mark_node(std::make_shared(y, const_zero)); auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond1 = context.mark_node(std::make_shared(is_x_negative, y_non_negative)); - auto atan = context.mark_node(std::make_shared(atan, const_pi)); + atan = context.mark_node(std::make_shared(atan, const_pi)); auto atan_y_x_plus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond1, atan_y_x_plus_pi, result)); // handle the third condition : x<0 && y<0 - auto y = context.mark_node(std::make_shared(y, const_zero)); + y = context.mark_node(std::make_shared(y, const_zero)); auto is_y_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond2 = context.mark_node(std::make_shared(is_x_negative, is_y_negative)); - auto y = context.mark_node(std::make_shared(atan, const_pi)); + y = context.mark_node(std::make_shared(atan, const_pi)); auto atan_y_x_minus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond2, atan_y_x_minus_pi, result)); // handle the fourth condition : x=0 && y>0 - auto x = context.mark_node(std::make_shared(x, const_zero)); + x = context.mark_node(std::make_shared(x, const_zero)); auto is_x_zero = context.mark_node(std::make_shared(x, const_zero)); - auto y = context.mark_node(std::make_shared(y, const_zero)); + y = context.mark_node(std::make_shared(y, const_zero)); auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); auto const_two = v0::Constant::create(element::f32, Shape{}, {2}); - auto const_pi = context.mark_node(std::make_shared(const_pi, const_two)); + const_pi = context.mark_node(std::make_shared(const_pi, const_two)); auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); // handle the fifth condition : x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); auto const_minus_two = v0::Constant::create(element::f32, Shape{}, {-2}); - auto const_minus_two = context.mark_node(std::make_shared(const_minus_two, const_pi)); + const_minus_two = context.mark_node(std::make_shared(const_minus_two, const_pi)); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); // check whether out tensor is given - if(!context.input_is_none(2) && context.get_input_size() == 3) { - context.mutate_input(2, result); + if (!context.input_is_none(2) && context.get_input_size() == 3) { + context.mutate_input(2, result); } // when out tensor is not in input return {result}; }; - + } // namespace op } // namespace pytorch } // namespace frontend From 30f642cb0a3af5eb163503584c6fdc3237bbb05c Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:50:58 +0530 Subject: [PATCH 40/47] Update test_atan2.py add different types for x and y --- tests/layer_tests/pytorch_tests/test_atan2.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 75111b37b85669..a51b3a52140c97 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -9,16 +9,17 @@ from pytorch_layer_test_class import PytorchLayerTest class TestAtan2(PytorchLayerTest): - def _prepare_input(self, y, x, dtype): - inputs = [np.array(y).astype(dtype), np.array(x).astype(dtype)] + def _prepare_input(self, y, x, dtype1=None, dtype2=None): + inputs = [np.array(y).astype(dtype1), np.array(x).astype(dtype2)] return inputs - def create_model(self, dtype=None, use_out=False): + def create_model(self, dtype1=None, dtype2=None use_out=False): dtype_map = { "float32": torch.float32, "float64": torch.float64, "int64": torch.int64, "int32": torch.int32, + "int16": torch.int16, "uint8": torch.uint8, "int8": torch.int8, } @@ -38,10 +39,11 @@ def __init__(self) -> None: def forward(self, y, x): return torch.atan2(input = y, other = x) - dtype = dtype_map.get(dtype) + dtype1 = dtype_map.get(dtype1) + dtype2 = dtype_map.get(dtype2) if use_out: - model_class = aten_atan2_out(dtype) + model_class = aten_atan2_out(dtype1) else: model_class = aten_atan2() @@ -52,12 +54,12 @@ def forward(self, y, x): @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8", "uint8"]) + @pytest.mark.parametrize("dtype1, dtype2", [(None, None), ("float32", "int32"), ("float64", "float64"), ("int32", "float64"), ("int64", "int16"), ("int8", "int8"), ("uint8", "uint8")]) @pytest.mark.parametrize( "y, x", [(0, 1.5), (0, 0), (1.25, -5), (1, 10), (-1, -5.5), (-1, -5), (1.25, -5.5), (1.9, 2.9), [10, 9.9]] ) @pytest.mark.parametrize("use_out", [False, True]) - def test_atan2_with_out(self, dtype, use_out, y, x, ie_device, precision, ir_version): + def test_atan2_with_out(self, dtype1, dtype2, use_out, y, x, ie_device, precision, ir_version): self._test( *self.create_model(dtype=dtype, use_out=use_out), ie_device, From 7a7d79d004060bf3bf568426a2136bd0ca22c12b Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 26 Mar 2024 21:34:47 +0530 Subject: [PATCH 41/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 50 +++++++++++++------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 8ddd8380ffe100..2f3c1ef7166d8f 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -35,62 +35,62 @@ OutputVector translate_atan2(const NodeContext& context) { // "aten::atan2.out(Tensor input,Tensor other, *,Tensor(a!) out) → Tensor(a!)" Output y; Output x; + // tie inputs together std::tie(y, x) = get_inputs_with_promoted_types(context, 0, 1); auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); - // align input types + + // align input types of dummy_const, x align_eltwise_input_types(context, x, dummy_const, false, true); - // handle the first condition : x>0 + // align input types of y, x + align_eltwise_input_types(context, y, x, false, true); + + // handle the first condition + // x>0 auto div_y_x = context.mark_node(std::make_shared(y, x)); auto atan = context.mark_node(std::make_shared(div_y_x)); - auto const_zero = v0::Constant::create(element::f32, Shape{}, {0}); + auto const_zero = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0})); + const_zero = context.mark_node(std::make_shared(const_zero, x)); auto result = atan->output(0); - // handle the second condition : x<0 && y>=0 - auto const_pi = v0::Constant::create(element::f32, Shape{}, {std::atan(1.0) * 4}); + // handle the second condition + // x<0 && y>=0 + auto const_pi = context.mark_node(v0::Constant::create(element::f32, Shape{}, {std::atan(1.0) * 4})); // Same input type - x = context.mark_node(std::make_shared(x, const_pi)); + const_pi = context.mark_node(std::make_shared(const_pi, x)); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); - y = context.mark_node(std::make_shared(y, const_zero)); auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond1 = context.mark_node(std::make_shared(is_x_negative, y_non_negative)); - atan = context.mark_node(std::make_shared(atan, const_pi)); auto atan_y_x_plus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond1, atan_y_x_plus_pi, result)); - // handle the third condition : x<0 && y<0 - y = context.mark_node(std::make_shared(y, const_zero)); + // handle the third condition + // x<0 && y<0 auto is_y_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond2 = context.mark_node(std::make_shared(is_x_negative, is_y_negative)); - y = context.mark_node(std::make_shared(atan, const_pi)); auto atan_y_x_minus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond2, atan_y_x_minus_pi, result)); - // handle the fourth condition : x=0 && y>0 - x = context.mark_node(std::make_shared(x, const_zero)); + // handle the fourth condition + // x=0 && y>0 auto is_x_zero = context.mark_node(std::make_shared(x, const_zero)); - y = context.mark_node(std::make_shared(y, const_zero)); auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); - auto const_two = v0::Constant::create(element::f32, Shape{}, {2}); - const_pi = context.mark_node(std::make_shared(const_pi, const_two)); + auto const_two = context.mark_node(v0::Constant::create(element::f32, Shape{}, {2})); + const_two = context.mark_node(std::make_shared(const_two, x)); + auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); - // handle the fifth condition : x=0 && y<0 + // handle the fifth condition + // x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); - auto const_minus_two = v0::Constant::create(element::f32, Shape{}, {-2}); - const_minus_two = context.mark_node(std::make_shared(const_minus_two, const_pi)); + auto const_minus_two = context.mark_v0::Constant::create(element::f32, Shape{}, {-2}); + const_minus_two = context.mark_node(std::make_shared(const_minus_two, x)); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); - // check whether out tensor is given - if (!context.input_is_none(2) && context.get_input_size() == 3) { - context.mutate_input(2, result); - } - - // when out tensor is not in input return {result}; }; From d0d8d2e584bc6fa18b51d439273ce5aada75e506 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 26 Mar 2024 21:35:22 +0530 Subject: [PATCH 42/47] Update tests/layer_tests/pytorch_tests/test_atan2.py Co-authored-by: Mateusz Mikolajczyk --- tests/layer_tests/pytorch_tests/test_atan2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index a51b3a52140c97..8d5a50bb51d571 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -13,7 +13,7 @@ def _prepare_input(self, y, x, dtype1=None, dtype2=None): inputs = [np.array(y).astype(dtype1), np.array(x).astype(dtype2)] return inputs - def create_model(self, dtype1=None, dtype2=None use_out=False): + def create_model(self, dtype1=None, dtype2=None, use_out=False): dtype_map = { "float32": torch.float32, "float64": torch.float64, From b1d67bffe4d81581455ac0fd9fe43552c913e88e Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 26 Mar 2024 22:11:39 +0530 Subject: [PATCH 43/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 30 +++++++++++++------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index 2f3c1ef7166d8f..ea9e152f91b3aa 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -33,60 +33,60 @@ OutputVector translate_atan2(const NodeContext& context) { // Check whether inputs present num_inputs_check(context, 2, 3); // "aten::atan2.out(Tensor input,Tensor other, *,Tensor(a!) out) → Tensor(a!)" - Output y; - Output x; - // tie inputs together - std::tie(y, x) = get_inputs_with_promoted_types(context, 0, 1); + // get input tensor x and y + Output x = context.get_input(0); + Output y = context.get_input(1); auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); // align input types of dummy_const, x align_eltwise_input_types(context, x, dummy_const, false, true); - // align input types of y, x - align_eltwise_input_types(context, y, x, false, true); + // align input types of y, x + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); - // handle the first condition + // handle the first condition // x>0 auto div_y_x = context.mark_node(std::make_shared(y, x)); auto atan = context.mark_node(std::make_shared(div_y_x)); auto const_zero = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0})); - const_zero = context.mark_node(std::make_shared(const_zero, x)); + const_zero = context.mark_node(std::make_shared(const_zero, x)); auto result = atan->output(0); - // handle the second condition + // handle the second condition // x<0 && y>=0 auto const_pi = context.mark_node(v0::Constant::create(element::f32, Shape{}, {std::atan(1.0) * 4})); // Same input type - const_pi = context.mark_node(std::make_shared(const_pi, x)); + const_pi = context.mark_node(std::make_shared(const_pi, x)); auto is_x_negative = context.mark_node(std::make_shared(x, const_zero)); auto y_non_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond1 = context.mark_node(std::make_shared(is_x_negative, y_non_negative)); auto atan_y_x_plus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond1, atan_y_x_plus_pi, result)); - // handle the third condition + // handle the third condition // x<0 && y<0 auto is_y_negative = context.mark_node(std::make_shared(y, const_zero)); auto cond2 = context.mark_node(std::make_shared(is_x_negative, is_y_negative)); auto atan_y_x_minus_pi = context.mark_node(std::make_shared(atan, const_pi)); result = context.mark_node(std::make_shared(cond2, atan_y_x_minus_pi, result)); - // handle the fourth condition + // handle the fourth condition // x=0 && y>0 auto is_x_zero = context.mark_node(std::make_shared(x, const_zero)); auto is_y_positive = context.mark_node(std::make_shared(y, const_zero)); auto cond3 = context.mark_node(std::make_shared(is_x_zero, is_y_positive)); auto const_two = context.mark_node(v0::Constant::create(element::f32, Shape{}, {2})); + // Same type conversion const_two = context.mark_node(std::make_shared(const_two, x)); - auto pi_div_two = context.mark_node(std::make_shared(const_pi, const_two)); result = context.mark_node(std::make_shared(cond3, pi_div_two, result)); - // handle the fifth condition + // handle the fifth condition // x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); - auto const_minus_two = context.mark_v0::Constant::create(element::f32, Shape{}, {-2}); + auto const_minus_two = context.mark(v0::Constant::create(element::f32, Shape{}, {-2})); + // Same type conversion const_minus_two = context.mark_node(std::make_shared(const_minus_two, x)); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); result = context.mark_node(std::make_shared(cond4, pi_div_two, result)); From 022569bfc209c2966ce76b3c85a2606fc5fe0f71 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Tue, 26 Mar 2024 22:20:52 +0530 Subject: [PATCH 44/47] Update atan2.cpp --- src/frontends/pytorch/src/op/atan2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp index ea9e152f91b3aa..54de1521b211c3 100644 --- a/src/frontends/pytorch/src/op/atan2.cpp +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -85,7 +85,7 @@ OutputVector translate_atan2(const NodeContext& context) { // handle the fifth condition // x=0 && y<0 auto cond4 = context.mark_node(std::make_shared(is_x_zero, is_y_negative)); - auto const_minus_two = context.mark(v0::Constant::create(element::f32, Shape{}, {-2})); + auto const_minus_two = context.mark_node(v0::Constant::create(element::f32, Shape{}, {-2})); // Same type conversion const_minus_two = context.mark_node(std::make_shared(const_minus_two, x)); auto pi_div_minus_two = context.mark_node(std::make_shared(const_pi, const_minus_two)); From a0d2b15491452329b8efb9e0fa0d40aff24ab64d Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Fri, 29 Mar 2024 11:23:05 +0530 Subject: [PATCH 45/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index 8d5a50bb51d571..db988b4a39c9cd 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -25,9 +25,9 @@ def create_model(self, dtype1=None, dtype2=None, use_out=False): } class aten_atan2_out(torch.nn.Module): - def __init__(self, out) -> None: + def __init__(self, dtype) -> None: super().__init__() - self.out = torch.empty(25, dtype=out) + self.out = torch.empty(25, dtype=dtype) def forward(self, y, x): return torch.atan2(input = y, other = x, out=self.out) @@ -61,7 +61,7 @@ def forward(self, y, x): @pytest.mark.parametrize("use_out", [False, True]) def test_atan2_with_out(self, dtype1, dtype2, use_out, y, x, ie_device, precision, ir_version): self._test( - *self.create_model(dtype=dtype, use_out=use_out), + *self.create_model(dtype=dtype1, use_out=use_out), ie_device, precision, ir_version, From fff1cd1feb6ae64d19aeb303af7f3424f2ebb4d6 Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Thu, 11 Apr 2024 13:36:10 +0530 Subject: [PATCH 46/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index db988b4a39c9cd..f8574ab6efc68c 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -20,7 +20,7 @@ def create_model(self, dtype1=None, dtype2=None, use_out=False): "int64": torch.int64, "int32": torch.int32, "int16": torch.int16, - "uint8": torch.uint8, + "uint8": torch.uint8, "int8": torch.int8, } @@ -61,7 +61,7 @@ def forward(self, y, x): @pytest.mark.parametrize("use_out", [False, True]) def test_atan2_with_out(self, dtype1, dtype2, use_out, y, x, ie_device, precision, ir_version): self._test( - *self.create_model(dtype=dtype1, use_out=use_out), + *self.create_model(dtype2=dtype2, dtype1=dtype1, use_out=use_out), ie_device, precision, ir_version, From ed9df7634c3db4d7efa99b716caaf72422a8330d Mon Sep 17 00:00:00 2001 From: rghvsh <116428320+rghvsh@users.noreply.github.com> Date: Fri, 26 Apr 2024 11:33:13 +0530 Subject: [PATCH 47/47] Update test_atan2.py --- tests/layer_tests/pytorch_tests/test_atan2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py index f8574ab6efc68c..67acbd6f9014e6 100644 --- a/tests/layer_tests/pytorch_tests/test_atan2.py +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -47,7 +47,6 @@ def forward(self, y, x): else: model_class = aten_atan2() - ref_net = None return model_class, ref_net, "aten::atan2"