Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AMP OP&Test]stack & unstack ops fp16 bf16 support #50999

Merged
merged 14 commits into from
Apr 11, 2023
41 changes: 41 additions & 0 deletions python/paddle/fluid/tests/unittests/test_stack_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,47 @@ def initParameters(self):
self.input_dim = ()


class TestStackFP16Op(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16


class TestStackFP16Op1(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.num_inputs = 8


class TestStackFP16Op2(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.num_inputs = 10


class TestStackFP16Op3(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -1


class TestStackFP16Op4(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -4


class TestStackFP16Op5(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 1


class TestStackFP16Op6(TestStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 3


class TestStackBF16Op(OpTest):
def initDefaultParameters(self):
self.num_inputs = 4
Expand Down
103 changes: 102 additions & 1 deletion python/paddle/fluid/tests/unittests/test_unstack_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
import unittest

import numpy as np
from op_test import OpTest
from op_test import OpTest, convert_float_to_uint16

import paddle
import paddle.fluid as fluid
from paddle.fluid import core


class TestUnStackOpBase(OpTest):
Expand Down Expand Up @@ -64,6 +66,41 @@ def test_check_grad(self):
self.check_grad(['X'], self.get_y_names(), check_eager=True)


def unstack_grad_fp16(dout):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

需删除

out_grad = np.ones(dout.shape, dout.dtype)
out_grad = out_grad / np.sum(out_grad)
return [out_grad]


class TestUnStackFP16Op(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16


class TestStackFP16Op3(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -1


class TestStackFP16Op4(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = -3


class TestStackFP16Op5(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 1


class TestStackFP16Op6(TestUnStackOpBase):
def initParameters(self):
self.dtype = np.float16
self.axis = 2


class TestStackOp3(TestUnStackOpBase):
def initParameters(self):
self.axis = -1
Expand All @@ -84,6 +121,70 @@ def initParameters(self):
self.axis = 2


@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestUnStackBF16Op(OpTest):
def initDefaultParameters(self):
self.input_dim = (5, 6, 7)
self.axis = 0
self.dtype = np.uint16

def initParameters(self):
pass

def get_y_names(self):
y_names = []
for i in range(self.input_dim[self.axis]):
y_names.append('y{}'.format(i))
return y_names

def setUp(self):
self.initDefaultParameters()
self.initParameters()
self.op_type = 'unstack'
self.python_api = paddle.unstack
self.x = np.random.random(size=self.input_dim).astype(np.float32)
outs = np.split(self.x, self.input_dim[self.axis], self.axis)
new_shape = list(self.input_dim)
del new_shape[self.axis]
y_names = self.get_y_names()
tmp = []
tmp_names = []
for i in range(self.input_dim[self.axis]):
tmp.append(
(
y_names[i],
np.reshape(convert_float_to_uint16(outs[i]), new_shape),
)
)
tmp_names.append(y_names[i])

self.x = convert_float_to_uint16(self.x)
self.python_out_sig = tmp_names
self.inputs = {'X': self.x}
self.outputs = {'Y': tmp}
self.attrs = {'axis': self.axis, 'num': self.input_dim[self.axis]}

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)

def test_check_grad(self):
place = core.CUDAPlace(0)
with fluid.dygraph.guard():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里反向的写法最好还是跟TestUnStackOpBase保持一致吧

x = paddle.to_tensor(self.inputs['X'])
x.stop_gradient = False
y = paddle.unstack(
x, axis=self.attrs['axis'], num=self.attrs['num']
)
dx = paddle.grad(y, x)[0].numpy()
dx_expected = convert_float_to_uint16(
np.ones(self.input_dim, np.float32)
)
np.testing.assert_array_equal(dx, dx_expected)


class TestUnstackZeroInputOp(unittest.TestCase):
def unstack_zero_input_static(self):

Expand Down