Skip to content

Commit

Permalink
【PIR api adaptor No.242、228】 Migrate unique_consecutive/moveaxis into…
Browse files Browse the repository at this point in the history
  • Loading branch information
DrRyanHuang authored and SecretXV committed Nov 28, 2023
1 parent 0ec982e commit 6445c77
Show file tree
Hide file tree
Showing 9 changed files with 19 additions and 19 deletions.
2 changes: 1 addition & 1 deletion paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2683,7 +2683,7 @@
backward: uniform_inplace_grad

- op : unique_consecutive
args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, int dtype = 5)
args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, DataType dtype = DataType::FLOAT32)
output : Tensor(out), Tensor(index), Tensor(counts)
infer_meta :
func : UniqueConsecutiveInferMeta
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4852,7 +4852,7 @@ void UniqueConsecutiveInferMeta(const MetaTensor& x,
bool return_inverse,
bool return_counts,
const std::vector<int>& axis,
int dtype,
DataType dtype,
MetaTensor* out,
MetaTensor* index,
MetaTensor* counts) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -716,7 +716,7 @@ void UniqueConsecutiveInferMeta(const MetaTensor& x,
bool return_inverse,
bool return_counts,
const std::vector<int>& axis,
int dtype,
DataType dtype,
MetaTensor* out,
MetaTensor* index,
MetaTensor* counts);
Expand Down
9 changes: 4 additions & 5 deletions paddle/phi/kernels/cpu/unique_consecutive_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,11 @@ void UniqueConsecutiveKernel(const Context& dev_ctx,
bool return_inverse,
bool return_counts,
const std::vector<int>& axis,
int dtype,
DataType dtype,
DenseTensor* out,
DenseTensor* index,
DenseTensor* counts) {
auto data_type = phi::TransToPhiDataType(dtype);
if (data_type == phi::DataType::INT32) {
if (dtype == phi::DataType::INT32) {
PADDLE_ENFORCE_LE(
x.numel(),
INT_MAX,
Expand All @@ -48,14 +47,14 @@ void UniqueConsecutiveKernel(const Context& dev_ctx,

if (axis.empty()) {
phi::VisitDataTypeTiny(
data_type,
dtype,
UniqueConsecutiveFlattenedTensorFunctor<Context, T>(
dev_ctx, x, out, return_inverse, return_counts, index, counts));
} else {
int valid_axis = axis[0];
if (valid_axis < 0) valid_axis += x.dims().size();
phi::VisitDataTypeTiny(
data_type,
dtype,
UniqueConsecutiveDimFunctor<Context, T>(dev_ctx,
x,
out,
Expand Down
9 changes: 4 additions & 5 deletions paddle/phi/kernels/gpu/unique_consecutive_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,11 @@ void UniqueConsecutiveKernel(const Context& dev_ctx,
bool return_inverse,
bool return_counts,
const std::vector<int>& axis,
int dtype,
DataType dtype,
DenseTensor* out,
DenseTensor* index,
DenseTensor* counts) {
auto data_type = phi::TransToPhiDataType(dtype);
if (data_type == phi::DataType::INT32) {
if (dtype == phi::DataType::INT32) {
PADDLE_ENFORCE_LE(
x.numel() + 1,
INT_MAX,
Expand All @@ -48,15 +47,15 @@ void UniqueConsecutiveKernel(const Context& dev_ctx,
// if 'axis' is not required, flatten the Tensor.
if (axis.empty()) {
phi::VisitDataTypeTiny(
data_type,
dtype,
UniqueConsecutiveFlattenedCUDAFunctor<Context, T>(
dev_ctx, x, out, return_inverse, return_counts, index, counts));
} else {
// 'axis' is required.
int valid_axis = axis[0];
if (valid_axis < 0) valid_axis += x.dims().size();
phi::VisitDataTypeTiny(
data_type,
dtype,
UniqueConsecutiveDimsCUDAFunctor<Context, T>(dev_ctx,
x,
out,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/unique_consecutive_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void UniqueConsecutiveKernel(const Context& dev_ctx,
bool return_inverse,
bool return_counts,
const std::vector<int>& axis,
int dtype,
DataType dtype,
DenseTensor* out,
DenseTensor* index,
DenseTensor* counts);
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2482,7 +2482,7 @@ def unique_consecutive(
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
out, inverse, counts = _C_ops.unique_consecutive(
x, return_inverse, return_counts, axis, attr_dtype
)
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/test_transpose_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,6 +710,7 @@ def test_moveaxis3(self):
self.assertEqual(out.shape, [2, 3])
paddle.enable_static()

@test_with_pir_api
def test_error(self):
x = paddle.randn([2, 3, 4, 5])
# src must have the same number with dst
Expand Down
9 changes: 5 additions & 4 deletions test/legacy_test/test_unique_consecutive_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.pir_utils import test_with_pir_api


def reference_unique_consecutive(
Expand Down Expand Up @@ -203,6 +204,7 @@ def setUp(self):
if core.is_compiled_with_cuda():
self.places.append(base.CUDAPlace(0))

@test_with_pir_api
def check_static_result(self, place):
with base.program_guard(base.Program(), base.Program()):
paddle.enable_static()
Expand All @@ -217,7 +219,6 @@ def check_static_result(self, place):
x_np = np.random.randint(20, size=100).astype("float32")
exe = base.Executor(place)
fetches = exe.run(
base.default_main_program(),
feed={"input_x": x_np},
fetch_list=[result],
)
Expand All @@ -240,6 +241,7 @@ def setUp(self):
if core.is_compiled_with_cuda():
self.places.append(base.CUDAPlace(0))

@test_with_pir_api
def check_static_result(self, place):
with base.program_guard(base.Program(), base.Program()):
paddle.enable_static()
Expand All @@ -256,7 +258,6 @@ def check_static_result(self, place):
x_np = np.random.randint(20, size=100).astype("float32")
exe = base.Executor(place)
fetches = exe.run(
base.default_main_program(),
feed={"input_x": x_np},
fetch_list=[result],
)
Expand All @@ -281,6 +282,7 @@ def setUp(self):
if core.is_compiled_with_cuda():
self.places.append(base.CUDAPlace(0))

@test_with_pir_api
def check_static_result(self, place):
with base.program_guard(base.Program(), base.Program()):
paddle.enable_static()
Expand All @@ -297,7 +299,6 @@ def check_static_result(self, place):
x_np = np.random.randint(20, size=100).astype("float32")
exe = base.Executor(place)
fetches = exe.run(
base.default_main_program(),
feed={"input_x": x_np},
fetch_list=[result],
)
Expand Down Expand Up @@ -347,7 +348,7 @@ def setUp(self):
}

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)


if __name__ == "__main__":
Expand Down

0 comments on commit 6445c77

Please sign in to comment.