From cb23d0670fa7c403883eb7b0058d6c9d4cc7ea46 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Fri, 3 Dec 2021 03:55:47 +0000 Subject: [PATCH 1/5] add paddle.gcd --- python/paddle/__init__.py | 1 + .../paddle/fluid/tests/unittests/test_gcd.py | 93 +++++++++++++++++++ python/paddle/tensor/__init__.py | 1 + python/paddle/tensor/math.py | 77 +++++++++++++++ 4 files changed, 172 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_gcd.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index c37c331bae4a6e..0dd56a243b3d43 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -225,6 +225,7 @@ from .tensor.math import lgamma # noqa: F401 from .tensor.math import rad2deg # noqa: F401 from .tensor.math import deg2rad # noqa: F401 +from .tensor.math import gcd # noqa: F401 from .tensor.math import diff # noqa: F401 from .tensor.math import angle # noqa: F401 diff --git a/python/paddle/fluid/tests/unittests/test_gcd.py b/python/paddle/fluid/tests/unittests/test_gcd.py new file mode 100644 index 00000000000000..616db3e8701b0b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_gcd.py @@ -0,0 +1,93 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid import Program, program_guard +from op_test import OpTest + +paddle.enable_static() + + +class TestGcdAPI(unittest.TestCase): + def setUp(self): + self.x1_np = 12 + self.x2_np = 20 + self.x1_shape = [1] + self.x2_shape = [1] + + def test_static_graph(self): + startup_program = fluid.Program() + train_program = fluid.Program() + with fluid.program_guard(startup_program, train_program): + x1 = fluid.data(name='input1', dtype='int64', shape=self.x1_shape) + x2 = fluid.data(name='input2', dtype='int64', shape=self.x2_shape) + out = paddle.gcd(x1, x2) + + place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + exe = fluid.Executor(place) + res = exe.run(fluid.default_main_program(), + feed={'input1': self.x1_np, + 'input2': self.x2_np}, + fetch_list=[out]) + self.assertTrue((np.array(res[0]) == np.gcd(self.x1_np, self.x2_np) + ).all()) + + def test_dygraph(self): + paddle.disable_static() + x1 = paddle.to_tensor(self.x1_np) + x2 = paddle.to_tensor(self.x2_np) + result = paddle.gcd(x1, x2) + self.assertEqual( + np.allclose(np.gcd(self.x1_np, self.x2_np), result.numpy()), True) + + paddle.enable_static() + + +class TestGcdAPI2(TestGcdAPI): + def setUp(self): + self.x1_np = np.arange(6) + self.x2_np = 20 + self.x1_shape = [6] + self.x2_shape = [1] + + +class TestGcdAPI3(TestGcdAPI): + def setUp(self): + self.x1_np = 0 + self.x2_np = 20 + self.x1_shape = [1] + self.x2_shape = [1] + + +class TestGcdAPI4(TestGcdAPI): + def setUp(self): + self.x1_np = 0 + self.x2_np = 0 + self.x1_shape = [1] + self.x2_shape = [1] + + +class TestGcdAPI5(TestGcdAPI): + def setUp(self): + self.x1_np = 12 + self.x2_np = -20 + self.x1_shape = [1] + self.x2_shape = [1] diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 7cc2c7623a9ff6..c31d40bc325216 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -191,6 +191,7 @@ from .math import diagonal # noqa: F401 from .math import rad2deg # noqa: F401 from .math import deg2rad # noqa: F401 +from .math import gcd # noqa: F401 from .math import diff # noqa: F401 from .math import angle # noqa: F401 diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 36d61fa08546bf..b59a984a260a44 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2725,6 +2725,83 @@ def deg2rad(x, name=None): type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale}) return out +def gcd(x1, x2, name=None): + """ + Computes the element-wise greatest common divisor (GCD) of input |x1| and |x2|. + Both x1 and x2 must have integer types. + + Note: + gcd(0,0)=0, gcd(0, x2)=|x2| + + Args: + x1, x2 (Tensor): An N-D Tensor, the data type is int8,int16,int32,int64,uint8. + If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + out (Tensor): An N-D Tensor, the data type is the same with input. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + x1 = paddle.to_tensor(12) + x2 = paddle.to_tensor(20) + paddle.gcd(x1, x2) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [4]) + + x3 = paddle.to_tensor(np.arange(6)) + paddle.gcd(x3, x2) + # Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [20, 1 , 2 , 1 , 4 , 5]) + + x4 = paddle.to_tensor(0) + paddle.gcd(x4, x2) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [20]) + + paddle.gcd(x4, x4) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [0]) + + x5 = paddle.to_tensor(-20) + paddle.gcd(x1, x5) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [4]) + """ + shape = paddle.broadcast_shape(x1.shape, x2.shape) + x1 = paddle.broadcast_to(x1, shape) + x2 = paddle.broadcast_to(x2, shape) + x1 = paddle.abs(x1) + x2 = paddle.abs(x2) + + def _gcd_cond_fn(x1, x2): + return paddle.any(x2 != 0) + + def _gcd_body_fn(x1, x2): + # paddle.mod will raise an error when any element of x2 is 0. To avoid + # that, we change those zeros to ones. Their values don't matter because + # they won't be used. + x2_safe = paddle.where(x2 != 0, x2, paddle.ones(x2.shape, x2.dtype)) + x1, x2 = (paddle.where(x2 != 0, x2, x1), + paddle.where(x2 != 0, paddle.mod(x1, x2_safe),paddle.zeros(x2.shape, x2.dtype))) + return (paddle.where(x1 < x2, x2, x1), paddle.where(x1 < x2, x1, x2)) + + if in_dygraph_mode(): + while _gcd_cond_fn(x1, x2): + x1, x2 = _gcd_body_fn(x1, x2) + + return x1 + else: + check_variable_and_dtype(x1, 'x1', ['int32', 'int64', 'int8', 'int16', 'uint8'], 'gcd') + check_variable_and_dtype(x2, 'x2', ['int32', 'int64', 'int8', 'int16', 'uint8'], 'gcd') + out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x1, x2]) + return out + + def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): r""" Computes the n-th forward difference along the given axis. From 9406ca39ae468f7c16611811f7177f49e48a1733 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Fri, 3 Dec 2021 08:05:04 +0000 Subject: [PATCH 2/5] add paddle.lcm --- python/paddle/__init__.py | 3 + .../paddle/fluid/tests/unittests/test_lcm.py | 93 +++++++++++++++++++ python/paddle/tensor/__init__.py | 1 + python/paddle/tensor/math.py | 62 ++++++++++++- 4 files changed, 156 insertions(+), 3 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_lcm.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 0dd56a243b3d43..6c29e6e3c18d81 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -226,6 +226,7 @@ from .tensor.math import rad2deg # noqa: F401 from .tensor.math import deg2rad # noqa: F401 from .tensor.math import gcd # noqa: F401 +from .tensor.math import lcm # noqa: F401 from .tensor.math import diff # noqa: F401 from .tensor.math import angle # noqa: F401 @@ -464,6 +465,8 @@ 'atan2', 'rad2deg', 'deg2rad', + 'gcd', + 'lcm', 'expand', 'broadcast_to', 'ones_like', diff --git a/python/paddle/fluid/tests/unittests/test_lcm.py b/python/paddle/fluid/tests/unittests/test_lcm.py new file mode 100644 index 00000000000000..4f59c60895ca7e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_lcm.py @@ -0,0 +1,93 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid import Program, program_guard +from op_test import OpTest + +paddle.enable_static() + + +class TestLcmAPI(unittest.TestCase): + def setUp(self): + self.x1_np = 12 + self.x2_np = 20 + self.x1_shape = [1] + self.x2_shape = [1] + + def test_static_graph(self): + startup_program = fluid.Program() + train_program = fluid.Program() + with fluid.program_guard(startup_program, train_program): + x1 = fluid.data(name='input1', dtype='int64', shape=self.x1_shape) + x2 = fluid.data(name='input2', dtype='int64', shape=self.x2_shape) + out = paddle.lcm(x1, x2) + + place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + exe = fluid.Executor(place) + res = exe.run(fluid.default_main_program(), + feed={'input1': self.x1_np, + 'input2': self.x2_np}, + fetch_list=[out]) + self.assertTrue((np.array(res[0]) == np.lcm(self.x1_np, self.x2_np) + ).all()) + + def test_dygraph(self): + paddle.disable_static() + x1 = paddle.to_tensor(self.x1_np) + x2 = paddle.to_tensor(self.x2_np) + result = paddle.lcm(x1, x2) + self.assertEqual( + np.allclose(np.lcm(self.x1_np, self.x2_np), result.numpy()), True) + + paddle.enable_static() + + +class TestLcmAPI2(TestLcmAPI): + def setUp(self): + self.x1_np = np.arange(6) + self.x2_np = 20 + self.x1_shape = [6] + self.x2_shape = [1] + + +class TestLcmAPI3(TestLcmAPI): + def setUp(self): + self.x1_np = 0 + self.x2_np = 20 + self.x1_shape = [1] + self.x2_shape = [1] + + +class TestLcmAPI4(TestLcmAPI): + def setUp(self): + self.x1_np = 0 + self.x2_np = 0 + self.x1_shape = [1] + self.x2_shape = [1] + + +class TestLcmAPI5(TestLcmAPI): + def setUp(self): + self.x1_np = 12 + self.x2_np = -20 + self.x1_shape = [1] + self.x2_shape = [1] diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index c31d40bc325216..21b6bc9d578643 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -192,6 +192,7 @@ from .math import rad2deg # noqa: F401 from .math import deg2rad # noqa: F401 from .math import gcd # noqa: F401 +from .math import lcm # noqa: F401 from .math import diff # noqa: F401 from .math import angle # noqa: F401 diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index b59a984a260a44..b7e528d3c32b6f 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2785,9 +2785,10 @@ def _gcd_body_fn(x1, x2): # paddle.mod will raise an error when any element of x2 is 0. To avoid # that, we change those zeros to ones. Their values don't matter because # they won't be used. - x2_safe = paddle.where(x2 != 0, x2, paddle.ones(x2.shape, x2.dtype)) - x1, x2 = (paddle.where(x2 != 0, x2, x1), - paddle.where(x2 != 0, paddle.mod(x1, x2_safe),paddle.zeros(x2.shape, x2.dtype))) + x2_not_equal_0 = (x2 != 0) + x2_safe = paddle.where(x2_not_equal_0, x2, paddle.ones(x2.shape, x2.dtype)) + x1, x2 = (paddle.where(x2_not_equal_0, x2, x1), + paddle.where(x2_not_equal_0, paddle.mod(x1, x2_safe),paddle.zeros(x2.shape, x2.dtype))) return (paddle.where(x1 < x2, x2, x1), paddle.where(x1 < x2, x1, x2)) if in_dygraph_mode(): @@ -2801,6 +2802,61 @@ def _gcd_body_fn(x1, x2): out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x1, x2]) return out +def lcm(x1, x2, name=None): + """ + Computes the element-wise least common multiple (LCM) of input |x1| and |x2|. + Both x1 and x2 must have integer types. + + Note: + lcm(0,0)=0, lcm(0, x2)=0 + + Args: + x1, x2 (Tensor): An N-D Tensor, the data type is int8,int16,int32,int64,uint8. + If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + out (Tensor): An N-D Tensor, the data type is the same with input. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + x1 = paddle.to_tensor(12) + x2 = paddle.to_tensor(20) + paddle.lcm(x1, x2) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [60]) + + x3 = paddle.to_tensor(np.arange(6)) + paddle.lcm(x3, x2) + # Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [0, 20, 20, 60, 20, 20]) + + x4 = paddle.to_tensor(0) + paddle.lcm(x4, x2) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [0]) + + paddle.lcm(x4, x4) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [0]) + + x5 = paddle.to_tensor(-20) + paddle.lcm(x1, x5) + # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, + # [60]) + """ + d = paddle.gcd(x1, x2) + # paddle.mod will raise an error when any element of x2 is 0. To avoid + # that, we change those zeros to ones. Their values don't matter because + # they won't be used. + d_equal_0 = paddle.equal(d, 0) + d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d) + out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x1 * x2) // d_safe) + return out def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): r""" From 79b7ff50c107721027d22cfdc75c848d164a5519 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Fri, 3 Dec 2021 08:55:08 +0000 Subject: [PATCH 3/5] add int32 unittest --- python/paddle/fluid/tests/unittests/test_gcd.py | 8 ++++---- python/paddle/fluid/tests/unittests/test_lcm.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_gcd.py b/python/paddle/fluid/tests/unittests/test_gcd.py index 616db3e8701b0b..8545f81e2145d3 100644 --- a/python/paddle/fluid/tests/unittests/test_gcd.py +++ b/python/paddle/fluid/tests/unittests/test_gcd.py @@ -36,8 +36,8 @@ def test_static_graph(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x1 = fluid.data(name='input1', dtype='int64', shape=self.x1_shape) - x2 = fluid.data(name='input2', dtype='int64', shape=self.x2_shape) + x1 = fluid.data(name='input1', dtype='int32', shape=self.x1_shape) + x2 = fluid.data(name='input2', dtype='int32', shape=self.x2_shape) out = paddle.gcd(x1, x2) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( @@ -63,8 +63,8 @@ def test_dygraph(self): class TestGcdAPI2(TestGcdAPI): def setUp(self): - self.x1_np = np.arange(6) - self.x2_np = 20 + self.x1_np = np.arange(6).astype(np.int32) + self.x2_np = np.array([20]).astype(np.int32) self.x1_shape = [6] self.x2_shape = [1] diff --git a/python/paddle/fluid/tests/unittests/test_lcm.py b/python/paddle/fluid/tests/unittests/test_lcm.py index 4f59c60895ca7e..e04dceca4ab5a7 100644 --- a/python/paddle/fluid/tests/unittests/test_lcm.py +++ b/python/paddle/fluid/tests/unittests/test_lcm.py @@ -36,8 +36,8 @@ def test_static_graph(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x1 = fluid.data(name='input1', dtype='int64', shape=self.x1_shape) - x2 = fluid.data(name='input2', dtype='int64', shape=self.x2_shape) + x1 = fluid.data(name='input1', dtype='int32', shape=self.x1_shape) + x2 = fluid.data(name='input2', dtype='int32', shape=self.x2_shape) out = paddle.lcm(x1, x2) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( @@ -63,8 +63,8 @@ def test_dygraph(self): class TestLcmAPI2(TestLcmAPI): def setUp(self): - self.x1_np = np.arange(6) - self.x2_np = 20 + self.x1_np = np.arange(6).astype(np.int32) + self.x2_np = np.array([20]).astype(np.int32) self.x1_shape = [6] self.x2_shape = [1] From a4dbe52c4761b03bc28a5fc9f7964d13ab6a4b07 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Mon, 6 Dec 2021 03:46:04 +0000 Subject: [PATCH 4/5] adjust api params name --- .../paddle/fluid/tests/unittests/test_gcd.py | 60 ++++++++-------- .../paddle/fluid/tests/unittests/test_lcm.py | 56 +++++++-------- python/paddle/tensor/math.py | 70 +++++++++---------- 3 files changed, 93 insertions(+), 93 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_gcd.py b/python/paddle/fluid/tests/unittests/test_gcd.py index 8545f81e2145d3..820216dc56cd60 100644 --- a/python/paddle/fluid/tests/unittests/test_gcd.py +++ b/python/paddle/fluid/tests/unittests/test_gcd.py @@ -27,67 +27,67 @@ class TestGcdAPI(unittest.TestCase): def setUp(self): - self.x1_np = 12 - self.x2_np = 20 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 12 + self.y_np = 20 + self.x_shape = [1] + self.y_shape = [1] def test_static_graph(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x1 = fluid.data(name='input1', dtype='int32', shape=self.x1_shape) - x2 = fluid.data(name='input2', dtype='int32', shape=self.x2_shape) - out = paddle.gcd(x1, x2) + x = fluid.data(name='input1', dtype='int32', shape=self.x_shape) + y = fluid.data(name='input2', dtype='int32', shape=self.y_shape) + out = paddle.gcd(x, y) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) res = exe.run(fluid.default_main_program(), - feed={'input1': self.x1_np, - 'input2': self.x2_np}, + feed={'input1': self.x_np, + 'input2': self.y_np}, fetch_list=[out]) - self.assertTrue((np.array(res[0]) == np.gcd(self.x1_np, self.x2_np) + self.assertTrue((np.array(res[0]) == np.gcd(self.x_np, self.y_np) ).all()) def test_dygraph(self): paddle.disable_static() - x1 = paddle.to_tensor(self.x1_np) - x2 = paddle.to_tensor(self.x2_np) - result = paddle.gcd(x1, x2) + x = paddle.to_tensor(self.x_np) + y = paddle.to_tensor(self.y_np) + result = paddle.gcd(x, y) self.assertEqual( - np.allclose(np.gcd(self.x1_np, self.x2_np), result.numpy()), True) + np.allclose(np.gcd(self.x_np, self.y_np), result.numpy()), True) paddle.enable_static() class TestGcdAPI2(TestGcdAPI): def setUp(self): - self.x1_np = np.arange(6).astype(np.int32) - self.x2_np = np.array([20]).astype(np.int32) - self.x1_shape = [6] - self.x2_shape = [1] + self.x_np = np.arange(6).astype(np.int32) + self.y_np = np.array([20]).astype(np.int32) + self.x_shape = [6] + self.y_shape = [1] class TestGcdAPI3(TestGcdAPI): def setUp(self): - self.x1_np = 0 - self.x2_np = 20 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 0 + self.y_np = 20 + self.x_shape = [1] + self.y_shape = [1] class TestGcdAPI4(TestGcdAPI): def setUp(self): - self.x1_np = 0 - self.x2_np = 0 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 0 + self.y_np = 0 + self.x_shape = [1] + self.y_shape = [1] class TestGcdAPI5(TestGcdAPI): def setUp(self): - self.x1_np = 12 - self.x2_np = -20 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 12 + self.y_np = -20 + self.x_shape = [1] + self.y_shape = [1] diff --git a/python/paddle/fluid/tests/unittests/test_lcm.py b/python/paddle/fluid/tests/unittests/test_lcm.py index e04dceca4ab5a7..123c3e3d444e1b 100644 --- a/python/paddle/fluid/tests/unittests/test_lcm.py +++ b/python/paddle/fluid/tests/unittests/test_lcm.py @@ -27,67 +27,67 @@ class TestLcmAPI(unittest.TestCase): def setUp(self): - self.x1_np = 12 - self.x2_np = 20 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 12 + self.y_np = 20 + self.x_shape = [1] + self.y_shape = [1] def test_static_graph(self): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x1 = fluid.data(name='input1', dtype='int32', shape=self.x1_shape) - x2 = fluid.data(name='input2', dtype='int32', shape=self.x2_shape) + x1 = fluid.data(name='input1', dtype='int32', shape=self.x_shape) + x2 = fluid.data(name='input2', dtype='int32', shape=self.y_shape) out = paddle.lcm(x1, x2) place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) res = exe.run(fluid.default_main_program(), - feed={'input1': self.x1_np, - 'input2': self.x2_np}, + feed={'input1': self.x_np, + 'input2': self.y_np}, fetch_list=[out]) - self.assertTrue((np.array(res[0]) == np.lcm(self.x1_np, self.x2_np) + self.assertTrue((np.array(res[0]) == np.lcm(self.x_np, self.y_np) ).all()) def test_dygraph(self): paddle.disable_static() - x1 = paddle.to_tensor(self.x1_np) - x2 = paddle.to_tensor(self.x2_np) + x1 = paddle.to_tensor(self.x_np) + x2 = paddle.to_tensor(self.y_np) result = paddle.lcm(x1, x2) self.assertEqual( - np.allclose(np.lcm(self.x1_np, self.x2_np), result.numpy()), True) + np.allclose(np.lcm(self.x_np, self.y_np), result.numpy()), True) paddle.enable_static() class TestLcmAPI2(TestLcmAPI): def setUp(self): - self.x1_np = np.arange(6).astype(np.int32) - self.x2_np = np.array([20]).astype(np.int32) - self.x1_shape = [6] - self.x2_shape = [1] + self.x_np = np.arange(6).astype(np.int32) + self.y_np = np.array([20]).astype(np.int32) + self.x_shape = [6] + self.y_shape = [1] class TestLcmAPI3(TestLcmAPI): def setUp(self): - self.x1_np = 0 - self.x2_np = 20 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 0 + self.y_np = 20 + self.x_shape = [1] + self.y_shape = [1] class TestLcmAPI4(TestLcmAPI): def setUp(self): - self.x1_np = 0 - self.x2_np = 0 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 0 + self.y_np = 0 + self.x_shape = [1] + self.y_shape = [1] class TestLcmAPI5(TestLcmAPI): def setUp(self): - self.x1_np = 12 - self.x2_np = -20 - self.x1_shape = [1] - self.x2_shape = [1] + self.x_np = 12 + self.y_np = -20 + self.x_shape = [1] + self.y_shape = [1] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index b7e528d3c32b6f..ebabf112cbdfaa 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2725,17 +2725,17 @@ def deg2rad(x, name=None): type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale}) return out -def gcd(x1, x2, name=None): +def gcd(x, y, name=None): """ - Computes the element-wise greatest common divisor (GCD) of input |x1| and |x2|. - Both x1 and x2 must have integer types. + Computes the element-wise greatest common divisor (GCD) of input |x| and |y|. + Both x and y must have integer types. Note: - gcd(0,0)=0, gcd(0, x2)=|x2| + gcd(0,0)=0, gcd(0, y)=|y| Args: - x1, x2 (Tensor): An N-D Tensor, the data type is int8,int16,int32,int64,uint8. - If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). + x, y (Tensor): An N-D Tensor, the data type is int8,int16,int32,int64,uint8. + If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: @@ -2772,47 +2772,47 @@ def gcd(x1, x2, name=None): # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, # [4]) """ - shape = paddle.broadcast_shape(x1.shape, x2.shape) - x1 = paddle.broadcast_to(x1, shape) - x2 = paddle.broadcast_to(x2, shape) - x1 = paddle.abs(x1) - x2 = paddle.abs(x2) + shape = paddle.broadcast_shape(x.shape, y.shape) + x = paddle.broadcast_to(x, shape) + y = paddle.broadcast_to(y, shape) + x = paddle.abs(x) + y = paddle.abs(y) - def _gcd_cond_fn(x1, x2): - return paddle.any(x2 != 0) + def _gcd_cond_fn(x, y): + return paddle.any(y != 0) - def _gcd_body_fn(x1, x2): - # paddle.mod will raise an error when any element of x2 is 0. To avoid + def _gcd_body_fn(x, y): + # paddle.mod will raise an error when any element of y is 0. To avoid # that, we change those zeros to ones. Their values don't matter because # they won't be used. - x2_not_equal_0 = (x2 != 0) - x2_safe = paddle.where(x2_not_equal_0, x2, paddle.ones(x2.shape, x2.dtype)) - x1, x2 = (paddle.where(x2_not_equal_0, x2, x1), - paddle.where(x2_not_equal_0, paddle.mod(x1, x2_safe),paddle.zeros(x2.shape, x2.dtype))) - return (paddle.where(x1 < x2, x2, x1), paddle.where(x1 < x2, x1, x2)) + y_not_equal_0 = (y != 0) + y_safe = paddle.where(y_not_equal_0, y, paddle.ones(y.shape, y.dtype)) + x, y = (paddle.where(y_not_equal_0, y, x), + paddle.where(y_not_equal_0, paddle.mod(x, y_safe),paddle.zeros(y.shape, y.dtype))) + return (paddle.where(x < y, y, x), paddle.where(x < y, x, y)) if in_dygraph_mode(): - while _gcd_cond_fn(x1, x2): - x1, x2 = _gcd_body_fn(x1, x2) + while _gcd_cond_fn(x, y): + x, y = _gcd_body_fn(x, y) - return x1 + return x else: - check_variable_and_dtype(x1, 'x1', ['int32', 'int64', 'int8', 'int16', 'uint8'], 'gcd') - check_variable_and_dtype(x2, 'x2', ['int32', 'int64', 'int8', 'int16', 'uint8'], 'gcd') - out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x1, x2]) + check_variable_and_dtype(x, 'x', ['int32', 'int64', 'int8', 'int16', 'uint8'], 'gcd') + check_variable_and_dtype(y, 'y', ['int32', 'int64', 'int8', 'int16', 'uint8'], 'gcd') + out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x, y]) return out -def lcm(x1, x2, name=None): +def lcm(x, y, name=None): """ - Computes the element-wise least common multiple (LCM) of input |x1| and |x2|. - Both x1 and x2 must have integer types. + Computes the element-wise least common multiple (LCM) of input |x| and |y|. + Both x and y must have integer types. Note: - lcm(0,0)=0, lcm(0, x2)=0 + lcm(0,0)=0, lcm(0, y)=0 Args: - x1, x2 (Tensor): An N-D Tensor, the data type is int8,int16,int32,int64,uint8. - If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). + x, y (Tensor): An N-D Tensor, the data type is int8,int16,int32,int64,uint8. + If x.shape != y.shape, they must be broadcastable to a common shape (which becomes the shape of the output). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: @@ -2849,13 +2849,13 @@ def lcm(x1, x2, name=None): # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True, # [60]) """ - d = paddle.gcd(x1, x2) - # paddle.mod will raise an error when any element of x2 is 0. To avoid + d = paddle.gcd(x, y) + # paddle.mod will raise an error when any element of y is 0. To avoid # that, we change those zeros to ones. Their values don't matter because # they won't be used. d_equal_0 = paddle.equal(d, 0) d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d) - out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x1 * x2) // d_safe) + out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x * y) // d_safe) return out def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): From 1ed6b26b5b0781c84d2929383db16e7135e69e69 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 7 Dec 2021 06:26:21 +0000 Subject: [PATCH 5/5] add tensor_method_func --- python/paddle/tensor/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 21b6bc9d578643..9b609887131ba1 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -407,6 +407,10 @@ 'multi_dot', 'solve', 'triangular_solve', + 'rad2deg', + 'deg2rad', + 'gcd', + 'lcm', 'diff', 'angle', ]