From 6445c77782fb78af167bcf28d1d1686b878fe60c Mon Sep 17 00:00:00 2001 From: Ryan <44900829+DrRyanHuang@users.noreply.github.com> Date: Wed, 8 Nov 2023 10:33:59 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90PIR=20api=20adaptor=20No.242=E3=80=812?= =?UTF-8?q?28=E3=80=91=20Migrate=20unique=5Fconsecutive/moveaxis=20into=20?= =?UTF-8?q?pir=20(#58688)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/phi/api/yaml/ops.yaml | 2 +- paddle/phi/infermeta/unary.cc | 2 +- paddle/phi/infermeta/unary.h | 2 +- paddle/phi/kernels/cpu/unique_consecutive_kernel.cc | 9 ++++----- paddle/phi/kernels/gpu/unique_consecutive_kernel.cu | 9 ++++----- paddle/phi/kernels/unique_consecutive_kernel.h | 2 +- python/paddle/tensor/manipulation.py | 2 +- test/legacy_test/test_transpose_op.py | 1 + test/legacy_test/test_unique_consecutive_op.py | 9 +++++---- 9 files changed, 19 insertions(+), 19 deletions(-) diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index f22f5be8ec0284..5bf57114402ee9 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -2683,7 +2683,7 @@ backward: uniform_inplace_grad - op : unique_consecutive - args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, int dtype = 5) + args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, DataType dtype = DataType::FLOAT32) output : Tensor(out), Tensor(index), Tensor(counts) infer_meta : func : UniqueConsecutiveInferMeta diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 0308093ed9fc67..ca470efc9b2a79 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -4852,7 +4852,7 @@ void UniqueConsecutiveInferMeta(const MetaTensor& x, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, MetaTensor* out, MetaTensor* index, MetaTensor* counts) { diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 70cfefa2a1daa6..c88a12d34506dd 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -716,7 +716,7 @@ void UniqueConsecutiveInferMeta(const MetaTensor& x, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, MetaTensor* out, MetaTensor* index, MetaTensor* counts); diff --git a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc index d0d674d06ee2bd..8c3a14a5edf76e 100644 --- a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc +++ b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc @@ -30,12 +30,11 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = phi::TransToPhiDataType(dtype); - if (data_type == phi::DataType::INT32) { + if (dtype == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel(), INT_MAX, @@ -48,14 +47,14 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, if (axis.empty()) { phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveFlattenedTensorFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { int valid_axis = axis[0]; if (valid_axis < 0) valid_axis += x.dims().size(); phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveDimFunctor(dev_ctx, x, out, diff --git a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu index 448e6ca38b3f50..9c32bff0ccb809 100644 --- a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu +++ b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu @@ -29,12 +29,11 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = phi::TransToPhiDataType(dtype); - if (data_type == phi::DataType::INT32) { + if (dtype == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel() + 1, INT_MAX, @@ -48,7 +47,7 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, // if 'axis' is not required, flatten the Tensor. if (axis.empty()) { phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveFlattenedCUDAFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { @@ -56,7 +55,7 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, int valid_axis = axis[0]; if (valid_axis < 0) valid_axis += x.dims().size(); phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveDimsCUDAFunctor(dev_ctx, x, out, diff --git a/paddle/phi/kernels/unique_consecutive_kernel.h b/paddle/phi/kernels/unique_consecutive_kernel.h index ade35d4d49730e..6c88f5947fc388 100644 --- a/paddle/phi/kernels/unique_consecutive_kernel.h +++ b/paddle/phi/kernels/unique_consecutive_kernel.h @@ -26,7 +26,7 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, DenseTensor* out, DenseTensor* index, DenseTensor* counts); diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 5f052be2942345..0cfe560fdc0687 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -2482,7 +2482,7 @@ def unique_consecutive( else: axis = [axis] attr_dtype = convert_np_dtype_to_dtype_(dtype) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): out, inverse, counts = _C_ops.unique_consecutive( x, return_inverse, return_counts, axis, attr_dtype ) diff --git a/test/legacy_test/test_transpose_op.py b/test/legacy_test/test_transpose_op.py index 98774942ce65d6..4752c8c26bd331 100644 --- a/test/legacy_test/test_transpose_op.py +++ b/test/legacy_test/test_transpose_op.py @@ -710,6 +710,7 @@ def test_moveaxis3(self): self.assertEqual(out.shape, [2, 3]) paddle.enable_static() + @test_with_pir_api def test_error(self): x = paddle.randn([2, 3, 4, 5]) # src must have the same number with dst diff --git a/test/legacy_test/test_unique_consecutive_op.py b/test/legacy_test/test_unique_consecutive_op.py index 36fd33490d18c6..72ef3aa79b4a81 100644 --- a/test/legacy_test/test_unique_consecutive_op.py +++ b/test/legacy_test/test_unique_consecutive_op.py @@ -20,6 +20,7 @@ import paddle from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api def reference_unique_consecutive( @@ -203,6 +204,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.places.append(base.CUDAPlace(0)) + @test_with_pir_api def check_static_result(self, place): with base.program_guard(base.Program(), base.Program()): paddle.enable_static() @@ -217,7 +219,6 @@ def check_static_result(self, place): x_np = np.random.randint(20, size=100).astype("float32") exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -240,6 +241,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.places.append(base.CUDAPlace(0)) + @test_with_pir_api def check_static_result(self, place): with base.program_guard(base.Program(), base.Program()): paddle.enable_static() @@ -256,7 +258,6 @@ def check_static_result(self, place): x_np = np.random.randint(20, size=100).astype("float32") exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -281,6 +282,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.places.append(base.CUDAPlace(0)) + @test_with_pir_api def check_static_result(self, place): with base.program_guard(base.Program(), base.Program()): paddle.enable_static() @@ -297,7 +299,6 @@ def check_static_result(self, place): x_np = np.random.randint(20, size=100).astype("float32") exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -347,7 +348,7 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) if __name__ == "__main__":