Skip to content

Commit

Permalink
[PIR]Migrate prod into pir (PaddlePaddle#58105)
Browse files Browse the repository at this point in the history
  • Loading branch information
0x45f authored Oct 25, 2023
1 parent 1c1f1d2 commit 8d0c452
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 32 deletions.
4 changes: 2 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def _get_reduce_axis(axis, x):


def _get_reduce_axis_with_tensor(axis, x):
if isinstance(axis, Variable):
if isinstance(axis, (Variable, paddle.pir.OpResult)):
if axis.shape[0] == len(x.shape):
reduce_all = True
else:
Expand Down Expand Up @@ -4538,7 +4538,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
x = cast(x, dtype)

reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.prod(x, axis, keepdim, reduce_all)
else:
helper = LayerHelper('reduce_prod', **locals())
Expand Down
61 changes: 31 additions & 30 deletions test/legacy_test/test_prod_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from test_sum_op import TestReduceOPTensorAxisBase

import paddle
from paddle.pir_utils import test_with_pir_api


class TestProdOp(unittest.TestCase):
Expand Down Expand Up @@ -70,33 +71,35 @@ def run_imperative(self):
dy_result.numpy(), expected_result, rtol=1e-05
)

@test_with_pir_api
def run_static(self, use_gpu=False):
input = paddle.static.data(
name='input', shape=[10, 10, 5], dtype='float32'
)
result0 = paddle.prod(input)
result1 = paddle.prod(input, axis=1)
result2 = paddle.prod(input, axis=-1)
result3 = paddle.prod(input, axis=[0, 1])
result4 = paddle.prod(input, axis=1, keepdim=True)
result5 = paddle.prod(input, axis=1, dtype='int64')
result6 = paddle.prod(input, axis=1, keepdim=True, dtype='int64')

place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
static_result = exe.run(
feed={"input": self.input},
fetch_list=[
result0,
result1,
result2,
result3,
result4,
result5,
result6,
],
)
with paddle.static.program_guard(paddle.static.Program()):
input = paddle.static.data(
name='input', shape=[10, 10, 5], dtype='float32'
)
result0 = paddle.prod(input)
result1 = paddle.prod(input, axis=1)
result2 = paddle.prod(input, axis=-1)
result3 = paddle.prod(input, axis=[0, 1])
result4 = paddle.prod(input, axis=1, keepdim=True)
result5 = paddle.prod(input, axis=1, dtype='int64')
result6 = paddle.prod(input, axis=1, keepdim=True, dtype='int64')

place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
static_result = exe.run(
feed={"input": self.input},
fetch_list=[
result0,
result1,
result2,
result3,
result4,
result5,
result6,
],
)

expected_result = np.prod(self.input)
np.testing.assert_allclose(
Expand Down Expand Up @@ -134,8 +137,7 @@ def test_cpu(self):
self.run_imperative()
paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program()):
self.run_static()
self.run_static()

def test_gpu(self):
if not paddle.base.core.is_compiled_with_cuda():
Expand All @@ -145,8 +147,7 @@ def test_gpu(self):
self.run_imperative()
paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program()):
self.run_static(use_gpu=True)
self.run_static(use_gpu=True)


class TestProdOpError(unittest.TestCase):
Expand Down

0 comments on commit 8d0c452

Please sign in to comment.