-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AMP OP&Test]stack & unstack ops fp16 bf16 support #50999
Merged
Merged
Changes from 8 commits
Commits
Show all changes
14 commits
Select commit
Hold shift + click to select a range
dac505b
stack fp16 & bf16 support
piDack daf9d17
unstack fp16 support
piDack 7c6041a
unstack bf16 support
piDack 9a63a7a
append stack fp16 ut
piDack 2b499fd
add unstack
piDack 9e8b2f0
recover unstack cpu kernel
piDack 728b962
Merge branch 'develop' of github.com:piDack/Paddle into stacks_fp_bf_…
piDack dbe2d54
fix some issue for unstack ut
piDack 799fe3f
delete unuse var
piDack 8d2d515
add check_place
piDack 78b49fa
Merge branch 'develop' into stacks_fp_bf_support
piDack 7406fa1
fix confict
piDack 2e5f700
Merge branch 'stacks_fp_bf_support' of github.com:piDack/Paddle into …
piDack c05a751
fix inference err
piDack File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,9 +15,11 @@ | |
import unittest | ||
|
||
import numpy as np | ||
from op_test import OpTest | ||
from op_test import OpTest, convert_float_to_uint16 | ||
|
||
import paddle | ||
import paddle.fluid as fluid | ||
from paddle.fluid import core | ||
|
||
|
||
class TestUnStackOpBase(OpTest): | ||
|
@@ -64,6 +66,41 @@ def test_check_grad(self): | |
self.check_grad(['X'], self.get_y_names(), check_eager=True) | ||
|
||
|
||
def unstack_grad_fp16(dout): | ||
out_grad = np.ones(dout.shape, dout.dtype) | ||
out_grad = out_grad / np.sum(out_grad) | ||
return [out_grad] | ||
|
||
|
||
class TestUnStackFP16Op(TestUnStackOpBase): | ||
def initParameters(self): | ||
self.dtype = np.float16 | ||
|
||
|
||
class TestStackFP16Op3(TestUnStackOpBase): | ||
def initParameters(self): | ||
self.dtype = np.float16 | ||
self.axis = -1 | ||
|
||
|
||
class TestStackFP16Op4(TestUnStackOpBase): | ||
def initParameters(self): | ||
self.dtype = np.float16 | ||
self.axis = -3 | ||
|
||
|
||
class TestStackFP16Op5(TestUnStackOpBase): | ||
def initParameters(self): | ||
self.dtype = np.float16 | ||
self.axis = 1 | ||
|
||
|
||
class TestStackFP16Op6(TestUnStackOpBase): | ||
def initParameters(self): | ||
self.dtype = np.float16 | ||
self.axis = 2 | ||
|
||
|
||
class TestStackOp3(TestUnStackOpBase): | ||
def initParameters(self): | ||
self.axis = -1 | ||
|
@@ -84,6 +121,70 @@ def initParameters(self): | |
self.axis = 2 | ||
|
||
|
||
@unittest.skipIf( | ||
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" | ||
) | ||
class TestUnStackBF16Op(OpTest): | ||
def initDefaultParameters(self): | ||
self.input_dim = (5, 6, 7) | ||
self.axis = 0 | ||
self.dtype = np.uint16 | ||
|
||
def initParameters(self): | ||
pass | ||
|
||
def get_y_names(self): | ||
y_names = [] | ||
for i in range(self.input_dim[self.axis]): | ||
y_names.append('y{}'.format(i)) | ||
return y_names | ||
|
||
def setUp(self): | ||
self.initDefaultParameters() | ||
self.initParameters() | ||
self.op_type = 'unstack' | ||
self.python_api = paddle.unstack | ||
self.x = np.random.random(size=self.input_dim).astype(np.float32) | ||
outs = np.split(self.x, self.input_dim[self.axis], self.axis) | ||
new_shape = list(self.input_dim) | ||
del new_shape[self.axis] | ||
y_names = self.get_y_names() | ||
tmp = [] | ||
tmp_names = [] | ||
for i in range(self.input_dim[self.axis]): | ||
tmp.append( | ||
( | ||
y_names[i], | ||
np.reshape(convert_float_to_uint16(outs[i]), new_shape), | ||
) | ||
) | ||
tmp_names.append(y_names[i]) | ||
|
||
self.x = convert_float_to_uint16(self.x) | ||
self.python_out_sig = tmp_names | ||
self.inputs = {'X': self.x} | ||
self.outputs = {'Y': tmp} | ||
self.attrs = {'axis': self.axis, 'num': self.input_dim[self.axis]} | ||
|
||
def test_check_output(self): | ||
place = core.CUDAPlace(0) | ||
self.check_output_with_place(place, check_eager=True) | ||
|
||
def test_check_grad(self): | ||
place = core.CUDAPlace(0) | ||
with fluid.dygraph.guard(): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里反向的写法最好还是跟TestUnStackOpBase保持一致吧 |
||
x = paddle.to_tensor(self.inputs['X']) | ||
x.stop_gradient = False | ||
y = paddle.unstack( | ||
x, axis=self.attrs['axis'], num=self.attrs['num'] | ||
) | ||
dx = paddle.grad(y, x)[0].numpy() | ||
dx_expected = convert_float_to_uint16( | ||
np.ones(self.input_dim, np.float32) | ||
) | ||
np.testing.assert_array_equal(dx, dx_expected) | ||
|
||
|
||
class TestUnstackZeroInputOp(unittest.TestCase): | ||
def unstack_zero_input_static(self): | ||
|
||
|
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
需删除