From b206320926d88925cb7592f27d6c2256adbba078 Mon Sep 17 00:00:00 2001 From: megemini Date: Mon, 16 Oct 2023 18:39:37 +0800 Subject: [PATCH 1/8] [Init] add atleast api --- python/paddle/__init__.py | 3 ++ python/paddle/tensor/__init__.py | 3 ++ python/paddle/tensor/manipulation.py | 66 ++++++++++++++++++++++++++++ test/legacy_test/test_atleast.py | 55 +++++++++++++++++++++++ 4 files changed, 127 insertions(+) create mode 100644 test/legacy_test/test_atleast.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 5b3e806c3f947..15432adf8c7a5 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -198,6 +198,9 @@ from .tensor.manipulation import ( # noqa: F401 + atleast_1d, + atleast_2d, + atleast_3d, cast, cast_, concat, diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 61005132276d9..b417257babf7e 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -111,6 +111,9 @@ from .logic import isclose # noqa: F401 from .logic import equal_all # noqa: F401 from .logic import is_tensor # noqa: F401 +from .manupulation import atleast_1d # noqa: F401 +from .manupulation import atleast_2d # noqa: F401 +from .manupulation import atleast_3d # noqa: F401 from .manipulation import cast # noqa: F401 from .manipulation import cast_ # noqa: F401 from .manipulation import concat # noqa: F401 diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 15d9eb5300a5a..4bdca9dd95666 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3734,6 +3734,72 @@ def reshape_(x, shape, name=None): return out +def atleast_1d(*inputs, name=None): + """ + TODO + + """ + out = [] + for tensor in inputs: + tensor = paddle.to_tensor(tensor) + if tensor.dim() == 0: + result = tensor.reshape((1,)) + else: + result = tensor + out.append(result) + + if len(out) == 1: + return out[0] + else: + return out + + +def atleast_2d(*inputs, name=None): + """ + TODO + + """ + out = [] + for tensor in inputs: + tensor = paddle.to_tensor(tensor) + if tensor.dim() == 0: + result = tensor.reshape((1, 1)) + elif tensor.dim() == 1: + result = paddle.unsqueeze(tensor, axis=0) + else: + result = tensor + out.append(result) + + if len(out) == 1: + return out[0] + else: + return out + + +def atleast_3d(*inputs, name=None): + """ + TODO + + """ + out = [] + for tensor in inputs: + tensor = paddle.to_tensor(tensor) + if tensor.dim() == 0: + result = tensor.reshape((1, 1, 1)) + elif tensor.dim() == 1: + result = paddle.unsqueeze(tensor, axis=[0, 2]) + elif tensor.dim() == 2: + result = paddle.unsqueeze(tensor, axis=2) + else: + result = tensor + out.append(result) + + if len(out) == 1: + return out[0] + else: + return out + + def gather_nd(x, index, name=None): """ diff --git a/test/legacy_test/test_atleast.py b/test/legacy_test/test_atleast.py new file mode 100644 index 0000000000000..3bd42ccddc581 --- /dev/null +++ b/test/legacy_test/test_atleast.py @@ -0,0 +1,55 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +import paddle + + +def func_ref(func, *inputs): + return func(*inputs) + + +test_list = [ + (paddle.atleast_1d, np.atleast_1d), + (paddle.atleast_2d, np.atleast_2d), + (paddle.atleast_3d, np.atleast_3d), +] + +""" +- **编程范式场景** + 常规覆盖动态图和静态图的测试场景 + +- **硬件场景** + 常规需覆盖 CPU、GPU 两种测试场景 + +- **参数组合场景** + - 需要测试单个向量、多个向量、`(向量 ... 向量)`,等方式 + - 需要测试数字与向量混合的方式 + - 需要测试不同数据类型:float16, uint16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128, bfloat16 + +- **计算精度** + 需要保证前向计算的精度正确性,通过 numpy 实现的函数的对比结果 + +- **维度测试** + - Paddle API 支持的最低维度为 0 维,单测中应编写相应的 0 维尺寸测试 case + - 测试从 0 维至多维(`atleast_Nd` 中大于N) + +""" + + +if __name__ == '__main__': + unittest.main() From b64f027c8ac0c29c7819ee070d2fd75a17648b18 Mon Sep 17 00:00:00 2001 From: megemini Date: Mon, 23 Oct 2023 15:09:13 +0800 Subject: [PATCH 2/8] [Add] add atleast test --- python/paddle/tensor/manipulation.py | 127 +++++++- test/legacy_test/test_atleast.py | 456 ++++++++++++++++++++++++++- 2 files changed, 565 insertions(+), 18 deletions(-) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index c379f66513d51..a4ea11eb2ef07 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3888,8 +3888,49 @@ def reshape_(x, shape, name=None): def atleast_1d(*inputs, name=None): """ - TODO + Convert inputs to tensors and return the view with at least 1-dimension. Scalar inputs are converted, + one or high-dimensional inputs are preserved. + Args: + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Note: + ``int8``, ``uint8``, ``complex64``, ``complex128`` are not supported in static graph mode. + + Returns: + One Tensor, if there is only one input. + List of Tensors, if there are more than one inputs. + + Examples: + .. code-block:: python + + >>> import paddle + + >>> # one input + >>> x = paddle.to_tensor(123, dtype='int32') + >>> out = paddle.atleast_1d(x) + >>> print(out) + Tensor(shape=[1], dtype=int32, place=Place(cpu), stop_gradient=True, + [123]) + + >>> # more than one inputs + >>> x = paddle.to_tensor(123, dtype='int32') + >>> y = paddle.to_tensor([1.23], dtype='float32') + >>> out = paddle.atleast_1d(x, y) + >>> print(out) + [Tensor(shape=[1], dtype=int32, place=Place(cpu), stop_gradient=True, + [123]), Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, + [1.23000002])] + + >>> # more than 1-D input + >>> x = paddle.to_tensor(123, dtype='int32') + >>> y = paddle.to_tensor([[1.23]], dtype='float32') + >>> out = paddle.atleast_1d(x, y) + >>> print(out) + [Tensor(shape=[1], dtype=int32, place=Place(cpu), stop_gradient=True, + [123]), Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=True, + [[1.23000002]])] """ out = [] for tensor in inputs: @@ -3908,8 +3949,48 @@ def atleast_1d(*inputs, name=None): def atleast_2d(*inputs, name=None): """ - TODO + Convert inputs to tensors and return the view with at least 2-dimension. Two or high-dimensional inputs are preserved. + + Args: + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Note: + ``int8``, ``uint8``, ``complex64``, ``complex128`` are not supported in static graph mode. + + Returns: + One Tensor, if there is only one input. + List of Tensors, if there are more than one inputs. + + Examples: + .. code-block:: python + + >>> import paddle + + >>> # one input + >>> x = paddle.to_tensor(123, dtype='int32') + >>> out = paddle.atleast_2d(x) + >>> print(out) + Tensor(shape=[1, 1], dtype=int32, place=Place(cpu), stop_gradient=True, + [[123]]) + >>> # more than one inputs + >>> x = paddle.to_tensor(123, dtype='int32') + >>> y = paddle.to_tensor([1.23], dtype='float32') + >>> out = paddle.atleast_2d(x, y) + >>> print(out) + [Tensor(shape=[1, 1], dtype=int32, place=Place(cpu), stop_gradient=True, + [[123]]), Tensor(shape=[1, 1], dtype=float32, place=Place(cpu), stop_gradient=True, + [[1.23000002]])] + + >>> # more than 2-D input + >>> x = paddle.to_tensor(123, dtype='int32') + >>> y = paddle.to_tensor([[[1.23]]], dtype='float32') + >>> out = paddle.atleast_2d(x, y) + >>> print(out) + [Tensor(shape=[1, 1], dtype=int32, place=Place(cpu), stop_gradient=True, + [[123]]), Tensor(shape=[1, 1, 1], dtype=float32, place=Place(cpu), stop_gradient=True, + [[[1.23000002]]])] """ out = [] for tensor in inputs: @@ -3930,8 +4011,48 @@ def atleast_2d(*inputs, name=None): def atleast_3d(*inputs, name=None): """ - TODO + Convert inputs to tensors and return the view with at least 3-dimension. Three or high-dimensional inputs are preserved. + + Args: + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Note: + ``int8``, ``uint8``, ``complex64``, ``complex128`` are not supported in static graph mode. + + Returns: + One Tensor, if there is only one input. + List of Tensors, if there are more than one inputs. + + Examples: + .. code-block:: python + >>> import paddle + + >>> # one input + >>> x = paddle.to_tensor(123, dtype='int32') + >>> out = paddle.atleast_3d(x) + >>> print(out) + Tensor(shape=[1, 1, 1], dtype=int32, place=Place(cpu), stop_gradient=True, + [[[123]]]) + + >>> # more than one inputs + >>> x = paddle.to_tensor(123, dtype='int32') + >>> y = paddle.to_tensor([1.23], dtype='float32') + >>> out = paddle.atleast_3d(x, y) + >>> print(out) + [Tensor(shape=[1, 1, 1], dtype=int32, place=Place(cpu), stop_gradient=True, + [[[123]]]), Tensor(shape=[1, 1, 1], dtype=float32, place=Place(cpu), stop_gradient=True, + [[[1.23000002]]])] + + >>> # more than 3-D input + >>> x = paddle.to_tensor(123, dtype='int32') + >>> y = paddle.to_tensor([[[[1.23]]]], dtype='float32') + >>> out = paddle.atleast_3d(x, y) + >>> print(out) + [Tensor(shape=[1, 1, 1], dtype=int32, place=Place(cpu), stop_gradient=True, + [[[123]]]), Tensor(shape=[1, 1, 1, 1], dtype=float32, place=Place(cpu), stop_gradient=True, + [[[[1.23000002]]]])] """ out = [] for tensor in inputs: diff --git a/test/legacy_test/test_atleast.py b/test/legacy_test/test_atleast.py index 3bd42ccddc581..5a5473d6a6ef9 100644 --- a/test/legacy_test/test_atleast.py +++ b/test/legacy_test/test_atleast.py @@ -15,11 +15,21 @@ import unittest import numpy as np +import parameterized as param import paddle +from paddle.base import core + +RTOL = 1e-5 +ATOL = 1e-8 +STATIC_NOT_SUPPORT = {'int8', 'uint8', 'complex64', 'complex128'} +PLACES = [paddle.CPUPlace()] + ( + [paddle.CUDAPlace(0)] if core.is_compiled_with_cuda() else [] +) def func_ref(func, *inputs): + """ref func, just for convenience""" return func(*inputs) @@ -29,26 +39,442 @@ def func_ref(func, *inputs): (paddle.atleast_3d, np.atleast_3d), ] -""" -- **编程范式场景** - 常规覆盖动态图和静态图的测试场景 -- **硬件场景** - 常规需覆盖 CPU、GPU 两种测试场景 +def generate_data(ndim, count=1, max_size=4, mix=False, dtype='int32'): + """generate test data + + Args: + ndim(int): dim of inputs + count(int): input count for each dim + max_size(int): max size for each dim + mix(bool): mix data types or not, like a data list [123, np.array(123), paddle.to_tensor(123), ...] + dtype(str): dtype + + Returns: + a list of data like: + [[data, dtype, shape, name], [data, dtype, shape, name] ... ] + """ + + rtn = [] + for d in range(ndim): + data = [ + np.random.randint( + 0, + 255, + size=[np.random.randint(1, max_size) for _ in range(d)], + dtype=dtype, + ) + for _ in range(count) + ] + + if mix: + + def _mix_data(data, idx): + if idx % 3 == 0: + return data.tolist() + elif idx % 3 == 1: + return data + elif idx % 3 == 2: + return paddle.to_tensor(data) + + # mix normal/numpy/tensor + rtn.append( + list( + zip( + *[ + [ + _mix_data(_data, idx), + str(_data.dtype), + _data.shape, + '{}d_{}_{}'.format(d, idx, 'mix'), + ] + for idx, _data in enumerate(data) + ] + ) + ) + ) + + else: + # normal + rtn.append( + list( + zip( + *[ + [ + _data.tolist(), + str(_data.dtype), + _data.shape, + '{}d_{}_{}'.format(d, idx, 'normal'), + ] + for idx, _data in enumerate(data) + ] + ) + ) + ) + # numpy + rtn.append( + list( + zip( + *[ + [ + _data, + str(_data.dtype), + _data.shape, + '{}d_{}_{}'.format(d, idx, 'numpy'), + ] + for idx, _data in enumerate(data) + ] + ) + ) + ) + # tensor + rtn.append( + list( + zip( + *[ + [ + paddle.to_tensor(_data), + str(_data.dtype), + _data.shape, + '{}d_{}_{}'.format(d, idx, 'tensor'), + ] + for idx, _data in enumerate(data) + ] + ) + ) + ) + return rtn + + +class BaseTest(unittest.TestCase): + """Test in each `PLACES`, each `test_list`, and in `static/dygraph`""" + + def _test_static_api( + self, + inputs: list, + dtypes: list, + shapes: list, + names: list, + ): + """Test `static`, convert `Tensor` to `numpy array` before feed into graph""" + for place in PLACES: + paddle.enable_static() + for func, func_type in test_list: + with paddle.static.program_guard(paddle.static.Program()): + x = [] + feed = {} + for i in range(len(inputs)): + input = inputs[i] + shape = shapes[i] + dtype = dtypes[i] + name = names[i] + x.append(paddle.static.data(name, shape, dtype)) + # the data feeded should NOT be a Tensor + feed[name] = ( + input.numpy() + if isinstance(input, paddle.Tensor) + else input + ) + + out = func(*x) + exe = paddle.static.Executor(place) + res = exe.run(feed=feed, fetch_list=[out]) + + # unwrap inputs when lenght 1 + if len(inputs) == 1: + res = res[0] + + out_ref = func_ref( + func_type, + *[ + input.numpy() + if isinstance(input, paddle.Tensor) + else input + for input in inputs + ] + ) + + for n, p in zip(out_ref, res): + np.testing.assert_allclose(n, p, rtol=RTOL, atol=ATOL) + + def _test_dygraph_api( + self, + inputs: list, + dtypes: list, + shapes: list, + names: list, + ): + """Test `dygraph`, and check grads""" + for place in PLACES: + paddle.disable_static(place) + for func, func_type in test_list: + out = func(*inputs) + out_ref = func_ref( + func_type, + *[ + input.numpy() + if isinstance(input, paddle.Tensor) + else input + for input in inputs + ] + ) + + for n, p in zip(out_ref, out): + np.testing.assert_allclose( + n, p.numpy(), rtol=RTOL, atol=ATOL + ) + + # check grads + if len(inputs) == 1: + out = [out] + + for y in out: + y.stop_gradient = False + z = y * 123 + grads = paddle.grad(z, y) + self.assertTrue(len(grads), 1) + self.assertEqual(grads[0].dtype, y.dtype) + self.assertEqual(grads[0].shape, y.shape) + + +@param.parameterized_class( + ('inputs', 'dtypes', 'shapes', 'names'), + (generate_data(5, count=1, max_size=4, dtype='int32')), +) +class TestAtleastDim(BaseTest): + """test dim from 0 to 5""" + + def test_all(self): + self._test_dygraph_api( + self.inputs, self.dtypes, self.shapes, self.names + ) + self._test_static_api(self.inputs, self.dtypes, self.shapes, self.names) + + +@param.parameterized_class( + ('inputs', 'dtypes', 'shapes', 'names'), + (generate_data(5, count=3, max_size=4, dtype='int32')), +) +class TestAtleastDimMoreInputs(BaseTest): + """test inputs of 3 tensors""" + + def test_all(self): + self._test_dygraph_api( + self.inputs, self.dtypes, self.shapes, self.names + ) + self._test_static_api(self.inputs, self.dtypes, self.shapes, self.names) + + +@param.parameterized_class( + ('inputs', 'dtypes', 'shapes', 'names'), + (generate_data(5, count=5, max_size=4, mix=True, dtype='int32')), +) +class TestAtleastMixData(BaseTest): + """test mix number/numpy/tensor""" + + def test_all(self): + self._test_dygraph_api( + self.inputs, self.dtypes, self.shapes, self.names + ) + self._test_static_api(self.inputs, self.dtypes, self.shapes, self.names) + + +@param.parameterized_class( + ('inputs', 'dtypes', 'shapes', 'names'), + ( + ( + ( + 123, + np.array([123], dtype='int32'), + paddle.to_tensor([[123]], dtype='int32'), + [[[123]]], + np.array([[[[123]]]], dtype='int32'), + paddle.to_tensor([[[[[123]]]]], dtype='int32'), + ), + ('int32', 'int32', 'int32', 'int32', 'int32', 'int32'), + ((), (1,), (1, 1), (1, 1, 1), (1, 1, 1, 1), (1, 1, 1, 1, 1)), + ( + '0_mixdim', + '1_mixdim', + '2_mixdim', + '3_mixdim', + '4_mixdim', + '5_mixdim', + ), + ), + ), +) +class TestAtleastMixDim(BaseTest): + """test mix dim""" + + def test_all(self): + self._test_dygraph_api( + self.inputs, self.dtypes, self.shapes, self.names + ) + self._test_static_api(self.inputs, self.dtypes, self.shapes, self.names) + + +@param.parameterized_class( + ('inputs', 'dtypes', 'shapes', 'names'), + ( + ( + ( + paddle.to_tensor(0.1, dtype='float16'), + paddle.to_tensor(1, dtype='uint16'), + paddle.to_tensor(0.1, dtype='float32'), + paddle.to_tensor(0.1, dtype='float64'), + paddle.to_tensor(1, dtype='int8'), + paddle.to_tensor(1, dtype='int16'), + paddle.to_tensor(1, dtype='int32'), + paddle.to_tensor(1, dtype='int64'), + paddle.to_tensor(1, dtype='uint8'), + paddle.to_tensor(1 + 1j, dtype='complex64'), + paddle.to_tensor(1 + 1j, dtype='complex128'), + paddle.to_tensor(0.1, dtype='bfloat16'), + ), + ( + 'float16', + 'uint16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'complex64', + 'complex128', + 'bfloat16', + ), + ( + (), + (), + (), + (), + (), + (), + (), + (), + (), + (), + (), + (), + ), + ( + '0_mixdtype', + '1_mixdtype', + '2_mixdtype', + '3_mixdtype', + '4_mixdtype', + '5_mixdtype', + '6_mixdtype', + '7_mixdtype', + '8_mixdtype', + '9_mixdtype', + '10_mixdtype', + '11_mixdtype', + ), + ), + ), +) +class TestAtleastMixDtypes(BaseTest): + """test mix dtypes""" + + def test_all(self): + self._test_dygraph_api( + self.inputs, self.dtypes, self.shapes, self.names + ) + + inputs = [] + dtypes = [] + shapes = [] + names = [] + for i in range(len(self.inputs)): + dtype = self.dtypes[i] + if dtype not in STATIC_NOT_SUPPORT: + inputs.append(self.inputs[i]) + dtypes.append(dtype) + shapes.append(self.shapes[i]) + names.append(self.names[i]) + + self._test_static_api(inputs, dtypes, shapes, names) + + +@param.parameterized_class( + ('inputs', 'dtypes', 'shapes', 'names'), + ( + ( + (paddle.to_tensor(1, dtype='int8'),), + ('int8',), + ((),), + ('0_errordtype',), + ), + ( + (paddle.to_tensor(1, dtype='uint8'),), + ('uint8',), + ((),), + ('1_errordtype',), + ), + ( + (paddle.to_tensor(1 + 1j, dtype='complex64'),), + ('complex64',), + ((),), + ('2_errordtype',), + ), + ( + (paddle.to_tensor(1 + 1j, dtype='complex128'),), + ('complex128',), + ((),), + ('3_errordtype',), + ), + ), +) +class TestAtleastErrorDtypes(BaseTest): + """test wrong dtypes in `static`""" + + def test_all(self): + with self.assertRaises(TypeError): + self._test_static_api( + self.inputs, self.dtypes, self.shapes, self.names + ) -- **参数组合场景** - - 需要测试单个向量、多个向量、`(向量 ... 向量)`,等方式 - - 需要测试数字与向量混合的方式 - - 需要测试不同数据类型:float16, uint16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128, bfloat16 -- **计算精度** - 需要保证前向计算的精度正确性,通过 numpy 实现的函数的对比结果 +@param.parameterized_class( + ('inputs', 'dtypes', 'shapes', 'names'), + ( + (((123, [123]),), ('int32',), ((),), ('0_combine',)), + ( + ((np.array([123], dtype='int32'), [[123]]),), + ('int32',), + ((),), + ('1_combine',), + ), + ( + ( + ( + np.array([[123]], dtype='int32'), + paddle.to_tensor([[[123]]], dtype='int32'), + ), + ), + ('int32',), + ((),), + ('2_combine',), + ), + ), +) +class TestAtleastErrorCombineInputs(BaseTest): + """test combine inputs, like: `at_leastNd((x, y))`, where paddle treats like numpy, not pytorch""" -- **维度测试** - - Paddle API 支持的最低维度为 0 维,单测中应编写相应的 0 维尺寸测试 case - - 测试从 0 维至多维(`atleast_Nd` 中大于N) + def test_all(self): + with self.assertRaises(ValueError): + self._test_dygraph_api( + self.inputs, self.dtypes, self.shapes, self.names + ) -""" + with self.assertRaises(ValueError): + self._test_static_api( + self.inputs, self.dtypes, self.shapes, self.names + ) if __name__ == '__main__': From 23959773b11ebd85bc21b07d4e55b40e537d8dca Mon Sep 17 00:00:00 2001 From: megemini Date: Mon, 23 Oct 2023 17:42:46 +0800 Subject: [PATCH 3/8] [Fix] import atleast --- python/paddle/__init__.py | 3 +++ python/paddle/tensor/__init__.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 289726eae23dc..6d8e6d330f26e 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -830,6 +830,9 @@ 'logspace', 'reshape', 'reshape_', + 'atleast_1d', + 'atleast_2d', + 'atleast_3d', 'reverse', 'nonzero', 'CUDAPinnedPlace', diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index e6c13efc7b64e..69e19a8caff42 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -111,9 +111,9 @@ from .logic import isclose # noqa: F401 from .logic import equal_all # noqa: F401 from .logic import is_tensor # noqa: F401 -from .manupulation import atleast_1d # noqa: F401 -from .manupulation import atleast_2d # noqa: F401 -from .manupulation import atleast_3d # noqa: F401 +from .manipulation import atleast_1d # noqa: F401 +from .manipulation import atleast_2d # noqa: F401 +from .manipulation import atleast_3d # noqa: F401 from .manipulation import cast # noqa: F401 from .manipulation import cast_ # noqa: F401 from .manipulation import concat # noqa: F401 From 4c8d828b7d5308e38afc75f31e3651a9a03a346d Mon Sep 17 00:00:00 2001 From: megemini Date: Sat, 4 Nov 2023 17:23:49 +0800 Subject: [PATCH 4/8] [Change] test_atleast.py to test_atleast_nd.py and add bool data type test --- python/paddle/tensor/manipulation.py | 6 +++--- test/legacy_test/{test_atleast.py => test_atleast_nd.py} | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) rename test/legacy_test/{test_atleast.py => test_atleast_nd.py} (99%) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 93a39d2a00d63..7d485d4b10817 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3892,7 +3892,7 @@ def atleast_1d(*inputs, name=None): one or high-dimensional inputs are preserved. Args: - inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``bool``, ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Note: @@ -3952,7 +3952,7 @@ def atleast_2d(*inputs, name=None): Convert inputs to tensors and return the view with at least 2-dimension. Two or high-dimensional inputs are preserved. Args: - inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``bool``, ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Note: @@ -4014,7 +4014,7 @@ def atleast_3d(*inputs, name=None): Convert inputs to tensors and return the view with at least 3-dimension. Three or high-dimensional inputs are preserved. Args: - inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``bool``, ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Note: diff --git a/test/legacy_test/test_atleast.py b/test/legacy_test/test_atleast_nd.py similarity index 99% rename from test/legacy_test/test_atleast.py rename to test/legacy_test/test_atleast_nd.py index 5a5473d6a6ef9..201fb18363e73 100644 --- a/test/legacy_test/test_atleast.py +++ b/test/legacy_test/test_atleast_nd.py @@ -319,6 +319,7 @@ def test_all(self): ( ( ( + paddle.to_tensor(True, dtype='bool'), paddle.to_tensor(0.1, dtype='float16'), paddle.to_tensor(1, dtype='uint16'), paddle.to_tensor(0.1, dtype='float32'), @@ -333,6 +334,7 @@ def test_all(self): paddle.to_tensor(0.1, dtype='bfloat16'), ), ( + 'bool', 'float16', 'uint16', 'float32', @@ -359,6 +361,7 @@ def test_all(self): (), (), (), + (), ), ( '0_mixdtype', @@ -373,6 +376,7 @@ def test_all(self): '9_mixdtype', '10_mixdtype', '11_mixdtype', + '12_mixdtype', ), ), ), From f2c82cd74f5a3e4bfd5d68dc4ee1ba468eb13cc7 Mon Sep 17 00:00:00 2001 From: megemini Date: Fri, 10 Nov 2023 19:19:39 +0800 Subject: [PATCH 5/8] [Update] update dtype supports and unittest --- python/paddle/tensor/manipulation.py | 15 +++------------ test/legacy_test/test_atleast_nd.py | 21 ++------------------- 2 files changed, 5 insertions(+), 31 deletions(-) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index f5c515f8826fb..450f7300d18d1 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3961,12 +3961,9 @@ def atleast_1d(*inputs, name=None): one or high-dimensional inputs are preserved. Args: - inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``bool``, ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``float32``, ``float64``, ``int16``, ``int32``, ``int64``, ``int8``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16`` or ``bool``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - Note: - ``int8``, ``uint8``, ``complex64``, ``complex128`` are not supported in static graph mode. - Returns: One Tensor, if there is only one input. List of Tensors, if there are more than one inputs. @@ -4021,12 +4018,9 @@ def atleast_2d(*inputs, name=None): Convert inputs to tensors and return the view with at least 2-dimension. Two or high-dimensional inputs are preserved. Args: - inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``bool``, ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``float32``, ``float64``, ``int16``, ``int32``, ``int64``, ``int8``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16`` or ``bool``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - Note: - ``int8``, ``uint8``, ``complex64``, ``complex128`` are not supported in static graph mode. - Returns: One Tensor, if there is only one input. List of Tensors, if there are more than one inputs. @@ -4083,12 +4077,9 @@ def atleast_3d(*inputs, name=None): Convert inputs to tensors and return the view with at least 3-dimension. Three or high-dimensional inputs are preserved. Args: - inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``bool``, ``float16``, ``uint16``, ``float32``, ``float64``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16``. + inputs (Tensor|list(Tensor)): One or more tensors. The data type is ``float16``, ``float32``, ``float64``, ``int16``, ``int32``, ``int64``, ``int8``, ``uint8``, ``complex64``, ``complex128``, ``bfloat16`` or ``bool``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - Note: - ``int8``, ``uint8``, ``complex64``, ``complex128`` are not supported in static graph mode. - Returns: One Tensor, if there is only one input. List of Tensors, if there are more than one inputs. diff --git a/test/legacy_test/test_atleast_nd.py b/test/legacy_test/test_atleast_nd.py index 201fb18363e73..1f256b6620eb9 100644 --- a/test/legacy_test/test_atleast_nd.py +++ b/test/legacy_test/test_atleast_nd.py @@ -22,7 +22,7 @@ RTOL = 1e-5 ATOL = 1e-8 -STATIC_NOT_SUPPORT = {'int8', 'uint8', 'complex64', 'complex128'} + PLACES = [paddle.CPUPlace()] + ( [paddle.CUDAPlace(0)] if core.is_compiled_with_cuda() else [] ) @@ -321,7 +321,6 @@ def test_all(self): ( paddle.to_tensor(True, dtype='bool'), paddle.to_tensor(0.1, dtype='float16'), - paddle.to_tensor(1, dtype='uint16'), paddle.to_tensor(0.1, dtype='float32'), paddle.to_tensor(0.1, dtype='float64'), paddle.to_tensor(1, dtype='int8'), @@ -336,7 +335,6 @@ def test_all(self): ( 'bool', 'float16', - 'uint16', 'float32', 'float64', 'int8', @@ -361,7 +359,6 @@ def test_all(self): (), (), (), - (), ), ( '0_mixdtype', @@ -376,7 +373,6 @@ def test_all(self): '9_mixdtype', '10_mixdtype', '11_mixdtype', - '12_mixdtype', ), ), ), @@ -388,20 +384,7 @@ def test_all(self): self._test_dygraph_api( self.inputs, self.dtypes, self.shapes, self.names ) - - inputs = [] - dtypes = [] - shapes = [] - names = [] - for i in range(len(self.inputs)): - dtype = self.dtypes[i] - if dtype not in STATIC_NOT_SUPPORT: - inputs.append(self.inputs[i]) - dtypes.append(dtype) - shapes.append(self.shapes[i]) - names.append(self.names[i]) - - self._test_static_api(inputs, dtypes, shapes, names) + self._test_static_api(self.inputs, self.dtypes, self.shapes, self.names) @param.parameterized_class( From 309ab650f1d45097ac3c48d052ea5c6dc6b9fc67 Mon Sep 17 00:00:00 2001 From: megemini Date: Fri, 10 Nov 2023 19:24:07 +0800 Subject: [PATCH 6/8] [Fix] dtype error unittest --- test/legacy_test/test_atleast_nd.py | 39 ----------------------------- 1 file changed, 39 deletions(-) diff --git a/test/legacy_test/test_atleast_nd.py b/test/legacy_test/test_atleast_nd.py index 1f256b6620eb9..2ba8b43fa0fa6 100644 --- a/test/legacy_test/test_atleast_nd.py +++ b/test/legacy_test/test_atleast_nd.py @@ -387,45 +387,6 @@ def test_all(self): self._test_static_api(self.inputs, self.dtypes, self.shapes, self.names) -@param.parameterized_class( - ('inputs', 'dtypes', 'shapes', 'names'), - ( - ( - (paddle.to_tensor(1, dtype='int8'),), - ('int8',), - ((),), - ('0_errordtype',), - ), - ( - (paddle.to_tensor(1, dtype='uint8'),), - ('uint8',), - ((),), - ('1_errordtype',), - ), - ( - (paddle.to_tensor(1 + 1j, dtype='complex64'),), - ('complex64',), - ((),), - ('2_errordtype',), - ), - ( - (paddle.to_tensor(1 + 1j, dtype='complex128'),), - ('complex128',), - ((),), - ('3_errordtype',), - ), - ), -) -class TestAtleastErrorDtypes(BaseTest): - """test wrong dtypes in `static`""" - - def test_all(self): - with self.assertRaises(TypeError): - self._test_static_api( - self.inputs, self.dtypes, self.shapes, self.names - ) - - @param.parameterized_class( ('inputs', 'dtypes', 'shapes', 'names'), ( From d336793c357995b82a9b013e85bde63ca0ec630a Mon Sep 17 00:00:00 2001 From: megemini Date: Fri, 10 Nov 2023 19:26:34 +0800 Subject: [PATCH 7/8] [Change] static test with test_with_pir_api --- test/legacy_test/test_atleast_nd.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/legacy_test/test_atleast_nd.py b/test/legacy_test/test_atleast_nd.py index 2ba8b43fa0fa6..d22149672fcdc 100644 --- a/test/legacy_test/test_atleast_nd.py +++ b/test/legacy_test/test_atleast_nd.py @@ -19,6 +19,7 @@ import paddle from paddle.base import core +from paddle.pir_utils import test_with_pir_api RTOL = 1e-5 ATOL = 1e-8 @@ -149,6 +150,7 @@ def _mix_data(data, idx): class BaseTest(unittest.TestCase): """Test in each `PLACES`, each `test_list`, and in `static/dygraph`""" + @test_with_pir_api def _test_static_api( self, inputs: list, From dc0900730df09a3b18dc19bf20b01467fa14cd6e Mon Sep 17 00:00:00 2001 From: megemini Date: Tue, 14 Nov 2023 17:31:38 +0800 Subject: [PATCH 8/8] [Add] atleast_Nd as tensor method --- python/paddle/tensor/__init__.py | 3 +++ test/legacy_test/test_atleast_nd.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 2bfc7183343c2..a1557bb4585d8 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -734,6 +734,9 @@ 'normal_', 'index_fill', 'index_fill_', + 'atleast_1d', + 'atleast_2d', + 'atleast_3d', ] # this list used in math_op_patch.py for magic_method bind diff --git a/test/legacy_test/test_atleast_nd.py b/test/legacy_test/test_atleast_nd.py index d22149672fcdc..781534dde25ce 100644 --- a/test/legacy_test/test_atleast_nd.py +++ b/test/legacy_test/test_atleast_nd.py @@ -413,7 +413,7 @@ def test_all(self): ), ) class TestAtleastErrorCombineInputs(BaseTest): - """test combine inputs, like: `at_leastNd((x, y))`, where paddle treats like numpy, not pytorch""" + """test combine inputs, like: `at_leastNd((x, y))`, where paddle treats like numpy""" def test_all(self): with self.assertRaises(ValueError): @@ -427,5 +427,32 @@ def test_all(self): ) +class TestAtleastAsTensorMethod(unittest.TestCase): + def test_as_tensor_method(self): + input = 123 + tensor = paddle.to_tensor(input) + + for place in PLACES: + paddle.disable_static(place) + + out = tensor.atleast_1d() + out_ref = np.atleast_1d(input) + + for n, p in zip(out_ref, out): + np.testing.assert_allclose(n, p.numpy(), rtol=RTOL, atol=ATOL) + + out = tensor.atleast_2d() + out_ref = np.atleast_2d(input) + + for n, p in zip(out_ref, out): + np.testing.assert_allclose(n, p.numpy(), rtol=RTOL, atol=ATOL) + + out = tensor.atleast_3d() + out_ref = np.atleast_3d(input) + + for n, p in zip(out_ref, out): + np.testing.assert_allclose(n, p.numpy(), rtol=RTOL, atol=ATOL) + + if __name__ == '__main__': unittest.main()