Skip to content

Commit

Permalink
【PIR API adaptor No.17、33、72、101、121、125】Migrate paddle.atan2,paddle.…
Browse files Browse the repository at this point in the history
…deg2rad,paddle.floor_divide,paddle.heaviside,paddle.kron,paddle.lerp into pir (PaddlePaddle#58718)
  • Loading branch information
enkilee authored Nov 9, 2023
1 parent 891b4dd commit e61586e
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 41 deletions.
14 changes: 7 additions & 7 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import numpy as np

import paddle
from paddle import _C_ops, _legacy_C_ops
from paddle import _C_ops
from paddle.base.libpaddle import DataType
from paddle.common_ops_import import VarDesc, dygraph_utils
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
Expand Down Expand Up @@ -3860,8 +3860,8 @@ def kron(x, y, name=None):
[12, 15, 18, 16, 20, 24],
[21, 24, 27, 28, 32, 36]])
"""
if in_dynamic_mode():
return _legacy_C_ops.kron(x, y)
if in_dynamic_or_pir_mode():
return _C_ops.kron(x, y)
else:
helper = LayerHelper('kron', **locals())
check_variable_and_dtype(
Expand Down Expand Up @@ -5149,7 +5149,7 @@ def atan2(x, y, name=None):
"""

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.atan2(x, y)
else:
check_variable_and_dtype(
Expand Down Expand Up @@ -5279,7 +5279,7 @@ def lerp(x, y, weight, name=None):
if isinstance(weight, float):
weight = paddle.full(shape=[], fill_value=weight, dtype=x.dtype)

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.lerp(x, y, weight)
else:
check_variable_and_dtype(
Expand Down Expand Up @@ -5478,7 +5478,7 @@ def deg2rad(x, name=None):
3.14159274)
"""
deg2rad_scale = np.pi / 180.0
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
if convert_dtype(x.dtype) in ['int32', 'int64']:
x = cast(x, dtype="float32")
return _C_ops.scale(x, deg2rad_scale, 0.0, True)
Expand Down Expand Up @@ -6026,7 +6026,7 @@ def heaviside(x, y, name=None):
[[0. , 0.20000000, 1. ],
[0. , 1. , 0.30000001]])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.heaviside(x, y)
else:
op_type = 'elementwise_heaviside'
Expand Down
20 changes: 16 additions & 4 deletions test/legacy_test/test_atan2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
from paddle.base import core
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()
np.random.seed(0)
Expand All @@ -45,10 +46,12 @@ def setUp(self):
self.outputs = {'Out': out}

def test_check_grad(self):
self.check_grad(['X1', 'X2'], 'Out', check_cinn=self.check_cinn)
self.check_grad(
['X1', 'X2'], 'Out', check_cinn=self.check_cinn, check_pir=True
)

def test_check_output(self):
self.check_output(check_cinn=self.check_cinn)
self.check_output(check_cinn=self.check_cinn, check_pir=True)

def init_dtype(self):
self.dtype = np.float64
Expand All @@ -69,6 +72,7 @@ def test_check_grad(self):
1 / self.inputs['X1'].size,
),
check_cinn=self.check_cinn,
check_pir=True,
)


Expand Down Expand Up @@ -100,6 +104,7 @@ def setUp(self):
if core.is_compiled_with_cuda():
self.place.append(paddle.CUDAPlace(0))

@test_with_pir_api
def test_static_api(self):
paddle.enable_static()

Expand Down Expand Up @@ -154,16 +159,23 @@ def setUp(self):

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_cinn=self.check_cinn)
self.check_output_with_place(
place, check_cinn=self.check_cinn, check_pir=True
)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X1', 'X2'], 'Out', check_cinn=self.check_cinn
place,
['X1', 'X2'],
'Out',
check_cinn=self.check_cinn,
check_pir=True,
)


class TestAtan2Error(unittest.TestCase):
@test_with_pir_api
def test_mismatch(self):
paddle.enable_static()

Expand Down
4 changes: 3 additions & 1 deletion test/legacy_test/test_elementwise_floordiv_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def setUp(self):
self.outputs = {'Out': self.out}

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
Expand Down Expand Up @@ -105,13 +105,15 @@ def device_guard(device=None):

class TestFloorDivideOp(unittest.TestCase):
def test_name(self):
paddle.enable_static()
with paddle_static_guard():
with base.program_guard(base.Program()):
x = paddle.static.data(name="x", shape=[2, 3], dtype="int64")
y = paddle.static.data(name='y', shape=[2, 3], dtype='int64')

y_1 = paddle.floor_divide(x, y, name='div_res')
self.assertEqual(('div_res' in y_1.name), True)
paddle.disable_static()

def test_dygraph(self):
paddle.disable_static()
Expand Down
16 changes: 10 additions & 6 deletions test/legacy_test/test_elementwise_heaviside_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
from paddle.base import core
from paddle.pir_utils import test_with_pir_api


def Heaviside_grad(x, y, dout, astype="float16", is_bfloat16=False):
Expand All @@ -41,16 +42,16 @@ def setUp(self):
self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_pir=True)

def test_check_grad_ingore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set("X"))
self.check_grad(['Y'], 'Out', no_grad_set=set("X"), check_pir=True)

def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_pir=True)


class TestHeavisideBroadcast(unittest.TestCase):
Expand Down Expand Up @@ -98,6 +99,7 @@ def setUp(self):
self.out_np = np.heaviside(self.x_np, self.y_np)
self.dtype = "float64"

@test_with_pir_api
def test_static(self):
for use_cuda in (
[False, True] if paddle.device.is_compiled_with_cuda() else [False]
Expand Down Expand Up @@ -177,7 +179,7 @@ def setUp(self):
self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad(self):
self.check_grad(
Expand All @@ -186,6 +188,7 @@ def test_check_grad(self):
user_defined_grads=Heaviside_grad(
self.inputs['X'], self.inputs['Y'], 1 / self.inputs['X'].size
),
check_pir=True,
)


Expand All @@ -212,7 +215,7 @@ def setUp(self):
self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out'])

def test_check_output(self):
self.check_output_with_place(self.place)
self.check_output_with_place(self.place, check_pir=True)

def test_check_grad(self):
self.check_grad_with_place(
Expand All @@ -226,6 +229,7 @@ def test_check_grad(self):
self.np_dtype,
True,
),
check_pir=True,
)


Expand Down
44 changes: 24 additions & 20 deletions test/legacy_test/test_kron_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import paddle.base.dygraph as dg
from paddle import base
from paddle.base import core
from paddle.pir_utils import test_with_pir_api


class TestKronOp(OpTest):
Expand All @@ -38,16 +39,16 @@ def _init_dtype(self):
return "float64"

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_pir=True)

def test_check_grad_ignore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set('X'))
self.check_grad(['Y'], 'Out', no_grad_set=set('X'), check_pir=True)

def test_check_grad_ignore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_pir=True)


class TestKronOp2(TestKronOp):
Expand Down Expand Up @@ -102,51 +103,51 @@ def setUp(self):
self.place = core.CUDAPlace(0)

def test_check_output(self):
self.check_output_with_place(self.place)
self.check_output_with_place(self.place, check_pir=True)

def test_check_grad(self):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')
self.check_grad_with_place(
self.place, ['X', 'Y'], 'Out', check_pir=True
)

def test_check_grad_ignore_x(self):
self.check_grad_with_place(
self.place, ['Y'], 'Out', no_grad_set=set('X')
self.place, ['Y'], 'Out', no_grad_set=set('X'), check_pir=True
)

def test_check_grad_ignore_y(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', no_grad_set=set('Y')
self.place, ['X'], 'Out', no_grad_set=set('Y'), check_pir=True
)


class TestKronLayer(unittest.TestCase):
def test_case(self):
a = np.random.randn(10, 10).astype(np.float64)
b = np.random.randn(10, 10).astype(np.float64)

place = base.CPUPlace()
with dg.guard(place):
a_var = dg.to_variable(a)
b_var = dg.to_variable(b)
c_var = paddle.kron(a_var, b_var)
np.testing.assert_allclose(c_var.numpy(), np.kron(a, b))

@test_with_pir_api
def test_case_with_output(self):
place = base.CPUPlace()
a = np.random.randn(10, 10).astype(np.float64)
b = np.random.randn(10, 10).astype(np.float64)

main = base.Program()
start = base.Program()
out_np = np.kron(a, b)
paddle.enable_static()
prog = paddle.static.Program()
with base.unique_name.guard():
with base.program_guard(main, start):
with paddle.static.program_guard(prog, prog):
a_var = paddle.static.data("a", [-1, -1], dtype="float64")
b_var = paddle.static.data("b", [-1, -1], dtype="float64")
out_var = paddle.kron(a_var, b_var)

place = base.CPUPlace()
exe = base.Executor(place)
exe.run(start)
(c,) = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var])
np.testing.assert_allclose(c, np.kron(a, b))
exe = paddle.static.Executor(place=place)
(res,) = exe.run(prog, feed={'a': a, 'b': b}, fetch_list=[out_var])
np.testing.assert_allclose(res, out_np)


class TestComplexKronOp(OpTest):
Expand Down Expand Up @@ -179,26 +180,29 @@ def init_input_output(self):
self.out = np.kron(self.x, self.y)

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'],
'Out',
check_pir=True,
)

def test_check_grad_ingore_x(self):
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
check_pir=True,
)

def test_check_grad_ingore_y(self):
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
check_pir=True,
)


Expand Down
9 changes: 6 additions & 3 deletions test/legacy_test/test_lerp_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
from paddle.base import core
from paddle.pir_utils import test_with_pir_api

paddle.enable_static()
np.random.seed(0)
Expand Down Expand Up @@ -52,10 +53,10 @@ def init_wshape(self):
self.wshape = [1]

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_pir=True)


class TestLerpWithDim2(TestLerp):
Expand Down Expand Up @@ -139,6 +140,7 @@ def setUp(self):
if core.is_compiled_with_cuda():
self.place.append(paddle.CUDAPlace(0))

@test_with_pir_api
def test_static_api(self):
paddle.enable_static()

Expand Down Expand Up @@ -268,7 +270,7 @@ def init_grad(self, w):

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_pir=True)

def test_check_grad(self):
place = core.CUDAPlace(0)
Expand All @@ -277,6 +279,7 @@ def test_check_grad(self):
['X', 'Y'],
'Out',
user_defined_grads=[self.x_grad, self.y_grad],
check_pir=True,
)


Expand Down

0 comments on commit e61586e

Please sign in to comment.