-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AMP OP&Test] add fp16/bf16 unittest for pool2d op #52288
Changes from 5 commits
f693d13
4e3041d
4a3478e
fec37e7
6b3f3c7
fe58f1a
67cd615
dedd794
1dccb86
1ed8413
95f8879
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,6 +15,7 @@ | |
import unittest | ||
|
||
import numpy as np | ||
from eager_op_test import convert_float_to_uint16 | ||
|
||
from paddle.fluid import core | ||
from paddle.fluid.tests.unittests.op_test import OpTest | ||
|
@@ -304,7 +305,11 @@ def setUp(self): | |
self.init_data_format() | ||
self.init_shape() | ||
|
||
input = np.random.random(self.shape).astype(self.dtype) | ||
if self.is_bfloat16_op(): | ||
input = np.random.random(self.shape).astype(np.float32) | ||
else: | ||
input = np.random.random(self.shape).astype(self.dtype) | ||
|
||
output = pool2D_forward_naive( | ||
input, | ||
self.ksize, | ||
|
@@ -317,8 +322,14 @@ def setUp(self): | |
self.data_format, | ||
self.pool_type, | ||
self.padding_algorithm, | ||
).astype(self.dtype) | ||
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} | ||
) | ||
|
||
if self.is_bfloat16_op(): | ||
output = output.astype(np.float32) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. output也需要调用convert_float_to_uint16 |
||
self.inputs = {'X': convert_float_to_uint16(input)} | ||
else: | ||
output = output.astype(self.dtype) | ||
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} | ||
|
||
self.attrs = { | ||
'strides': self.strides, | ||
|
@@ -576,6 +587,40 @@ def test_check_grad(self): | |
globals()[cls_name] = TestFp16Case | ||
|
||
|
||
def create_test_bf16_class(parent, check_grad=True): | ||
@unittest.skipIf( | ||
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" | ||
) | ||
class TestBf16Case(parent): | ||
def init_kernel_type(self): | ||
self.use_cuda = True | ||
self.dtype = np.uint16 | ||
|
||
def test_check_output(self): | ||
if core.is_compiled_with_cuda(): | ||
place = core.CUDAPlace(0) | ||
self.check_output_with_place( | ||
place, | ||
atol=1e-3, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 直接使用默认atol即可,无需设置 |
||
check_dygraph=(not self.use_mkldnn), | ||
) | ||
|
||
def test_check_grad(self): | ||
place = core.CUDAPlace(0) | ||
if self.pool_type != "max" and check_grad: | ||
self.check_grad_with_place( | ||
place, | ||
{'X'}, | ||
'Out', | ||
max_relative_error=0.07, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
check_dygraph=(not self.use_mkldnn), | ||
) | ||
|
||
cls_name = "{}_{}".format(parent.__name__, "Bf16Op") | ||
TestBf16Case.__name__ = cls_name | ||
globals()[cls_name] = TestBf16Case | ||
|
||
|
||
create_test_cudnn_fp16_class(TestPool2D_Op) | ||
create_test_cudnn_fp16_class(TestCase1, check_grad=False) | ||
create_test_cudnn_fp16_class(TestCase2) | ||
|
@@ -590,6 +635,12 @@ def test_check_grad(self): | |
create_test_fp16_class(TestCase4) | ||
create_test_fp16_class(TestCase5) | ||
|
||
create_test_bf16_class(TestPool2D_Op) | ||
create_test_bf16_class(TestCase1, check_grad=False) | ||
create_test_bf16_class(TestCase2) | ||
create_test_bf16_class(TestCase3) | ||
create_test_bf16_class(TestCase4) | ||
create_test_bf16_class(TestCase5) | ||
# --------------------test pool2d use ceil mode-------------------- | ||
|
||
|
||
|
@@ -735,6 +786,20 @@ def init_shape(self): | |
create_test_cudnn_fp16_class(TestCase4_AsyPadding) | ||
create_test_cudnn_fp16_class(TestCase5_AsyPadding) | ||
|
||
create_test_fp16_class(TestPool2D_AsyPadding) | ||
create_test_fp16_class(TestCase1_AsyPadding, check_grad=False) | ||
create_test_fp16_class(TestCase2_AsyPadding) | ||
create_test_fp16_class(TestCase3_AsyPadding) | ||
create_test_fp16_class(TestCase4_AsyPadding) | ||
create_test_fp16_class(TestCase5_AsyPadding) | ||
|
||
create_test_bf16_class(TestPool2D_AsyPadding) | ||
create_test_bf16_class(TestCase1_AsyPadding, check_grad=False) | ||
create_test_bf16_class(TestCase2_AsyPadding) | ||
create_test_bf16_class(TestCase3_AsyPadding) | ||
create_test_bf16_class(TestCase4_AsyPadding) | ||
create_test_bf16_class(TestCase5_AsyPadding) | ||
|
||
create_test_cudnn_use_ceil_class(TestPool2D_AsyPadding) | ||
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding) | ||
|
||
|
@@ -847,6 +912,20 @@ def init_shape(self): | |
create_test_cudnn_fp16_class(TestCase4_channel_last) | ||
create_test_cudnn_fp16_class(TestCase5_channel_last) | ||
|
||
create_test_fp16_class(TestPool2D_channel_last) | ||
create_test_fp16_class(TestCase1_channel_last, check_grad=False) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
create_test_fp16_class(TestCase2_channel_last) | ||
create_test_fp16_class(TestCase3_channel_last) | ||
create_test_fp16_class(TestCase4_channel_last) | ||
create_test_fp16_class(TestCase5_channel_last) | ||
|
||
create_test_bf16_class(TestPool2D_channel_last) | ||
create_test_bf16_class(TestCase1_channel_last, check_grad=False) | ||
create_test_bf16_class(TestCase2_channel_last) | ||
create_test_bf16_class(TestCase3_channel_last) | ||
create_test_bf16_class(TestCase4_channel_last) | ||
create_test_bf16_class(TestCase5_channel_last) | ||
|
||
create_test_cudnn_use_ceil_class(TestPool2D_channel_last) | ||
create_test_cudnn_use_ceil_class(TestCase1_channel_last) | ||
|
||
|
@@ -964,6 +1043,20 @@ def init_shape(self): | |
create_test_cudnn_fp16_class(TestCase4_AsyPadding_channel_last) | ||
create_test_cudnn_fp16_class(TestCase5_AsyPadding_channel_last) | ||
|
||
create_test_fp16_class(TestPool2D_AsyPadding_channel_last) | ||
create_test_fp16_class(TestCase1_AsyPadding_channel_last, check_grad=False) | ||
create_test_fp16_class(TestCase2_AsyPadding_channel_last) | ||
create_test_fp16_class(TestCase3_AsyPadding_channel_last) | ||
create_test_fp16_class(TestCase4_AsyPadding_channel_last) | ||
create_test_fp16_class(TestCase5_AsyPadding_channel_last) | ||
|
||
create_test_bf16_class(TestPool2D_AsyPadding_channel_last) | ||
create_test_bf16_class(TestCase1_AsyPadding_channel_last, check_grad=False) | ||
create_test_bf16_class(TestCase2_AsyPadding_channel_last) | ||
create_test_bf16_class(TestCase3_AsyPadding_channel_last) | ||
create_test_bf16_class(TestCase4_AsyPadding_channel_last) | ||
create_test_bf16_class(TestCase5_AsyPadding_channel_last) | ||
|
||
create_test_cudnn_use_ceil_class(TestPool2D_AsyPadding_channel_last) | ||
create_test_cudnn_use_ceil_class(TestCase1_AsyPadding_channel_last) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
删除多余头文件