diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index e5cb39978f730..6865d7ecb4c1f 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -2681,7 +2681,7 @@ backward: uniform_inplace_grad - op : unique_consecutive - args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, int dtype = 5) + args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, DataType dtype = DataType::FLOAT32) output : Tensor(out), Tensor(index), Tensor(counts) infer_meta : func : UniqueConsecutiveInferMeta diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 8873a617ef303..d14b622ea1e5c 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -4852,7 +4852,7 @@ void UniqueConsecutiveInferMeta(const MetaTensor& x, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, MetaTensor* out, MetaTensor* index, MetaTensor* counts) { diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 8a28d454e42f7..4d69df6760e15 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -716,7 +716,7 @@ void UniqueConsecutiveInferMeta(const MetaTensor& x, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, MetaTensor* out, MetaTensor* index, MetaTensor* counts); diff --git a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc index d0d674d06ee2b..8c3a14a5edf76 100644 --- a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc +++ b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc @@ -30,12 +30,11 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = phi::TransToPhiDataType(dtype); - if (data_type == phi::DataType::INT32) { + if (dtype == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel(), INT_MAX, @@ -48,14 +47,14 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, if (axis.empty()) { phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveFlattenedTensorFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { int valid_axis = axis[0]; if (valid_axis < 0) valid_axis += x.dims().size(); phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveDimFunctor(dev_ctx, x, out, diff --git a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu index 448e6ca38b3f5..9c32bff0ccb80 100644 --- a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu +++ b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu @@ -29,12 +29,11 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = phi::TransToPhiDataType(dtype); - if (data_type == phi::DataType::INT32) { + if (dtype == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel() + 1, INT_MAX, @@ -48,7 +47,7 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, // if 'axis' is not required, flatten the Tensor. if (axis.empty()) { phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveFlattenedCUDAFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { @@ -56,7 +55,7 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, int valid_axis = axis[0]; if (valid_axis < 0) valid_axis += x.dims().size(); phi::VisitDataTypeTiny( - data_type, + dtype, UniqueConsecutiveDimsCUDAFunctor(dev_ctx, x, out, diff --git a/paddle/phi/kernels/unique_consecutive_kernel.h b/paddle/phi/kernels/unique_consecutive_kernel.h index ade35d4d49730..6c88f5947fc38 100644 --- a/paddle/phi/kernels/unique_consecutive_kernel.h +++ b/paddle/phi/kernels/unique_consecutive_kernel.h @@ -26,7 +26,7 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, bool return_inverse, bool return_counts, const std::vector& axis, - int dtype, + DataType dtype, DenseTensor* out, DenseTensor* index, DenseTensor* counts); diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 40856399238ae..6a0432c95581d 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -2481,7 +2481,7 @@ def unique_consecutive( else: axis = [axis] attr_dtype = convert_np_dtype_to_dtype_(dtype) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): out, inverse, counts = _C_ops.unique_consecutive( x, return_inverse, return_counts, axis, attr_dtype ) diff --git a/test/legacy_test/test_transpose_op.py b/test/legacy_test/test_transpose_op.py index 98774942ce65d..4752c8c26bd33 100644 --- a/test/legacy_test/test_transpose_op.py +++ b/test/legacy_test/test_transpose_op.py @@ -710,6 +710,7 @@ def test_moveaxis3(self): self.assertEqual(out.shape, [2, 3]) paddle.enable_static() + @test_with_pir_api def test_error(self): x = paddle.randn([2, 3, 4, 5]) # src must have the same number with dst diff --git a/test/legacy_test/test_unique_consecutive_op.py b/test/legacy_test/test_unique_consecutive_op.py index 36fd33490d18c..72ef3aa79b4a8 100644 --- a/test/legacy_test/test_unique_consecutive_op.py +++ b/test/legacy_test/test_unique_consecutive_op.py @@ -20,6 +20,7 @@ import paddle from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api def reference_unique_consecutive( @@ -203,6 +204,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.places.append(base.CUDAPlace(0)) + @test_with_pir_api def check_static_result(self, place): with base.program_guard(base.Program(), base.Program()): paddle.enable_static() @@ -217,7 +219,6 @@ def check_static_result(self, place): x_np = np.random.randint(20, size=100).astype("float32") exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -240,6 +241,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.places.append(base.CUDAPlace(0)) + @test_with_pir_api def check_static_result(self, place): with base.program_guard(base.Program(), base.Program()): paddle.enable_static() @@ -256,7 +258,6 @@ def check_static_result(self, place): x_np = np.random.randint(20, size=100).astype("float32") exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -281,6 +282,7 @@ def setUp(self): if core.is_compiled_with_cuda(): self.places.append(base.CUDAPlace(0)) + @test_with_pir_api def check_static_result(self, place): with base.program_guard(base.Program(), base.Program()): paddle.enable_static() @@ -297,7 +299,6 @@ def check_static_result(self, place): x_np = np.random.randint(20, size=100).astype("float32") exe = base.Executor(place) fetches = exe.run( - base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -347,7 +348,7 @@ def setUp(self): } def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) if __name__ == "__main__":