Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 4】No.63 add fp16 and bf16 for eye and frame #51819

Merged
merged 12 commits into from
Apr 6, 2023
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/eye_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
// limitations under the License.

#include "paddle/phi/kernels/eye_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/eye_kernel_impl.h"
Expand All @@ -26,4 +25,5 @@ PD_REGISTER_KERNEL(eye,
double,
int64_t,
int,
phi::dtype::float16) {}
phi::dtype::float16,
phi::dtype::bfloat16) {}
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/frame_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
// limitations under the License.

#include "paddle/phi/kernels/frame_grad_kernel.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/complex.h"
#include "paddle/phi/core/kernel_registry.h"
Expand All @@ -28,5 +27,6 @@ PD_REGISTER_KERNEL(frame_grad,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/frame_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -28,5 +28,6 @@ PD_REGISTER_KERNEL(frame,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
52 changes: 46 additions & 6 deletions python/paddle/fluid/tests/unittests/test_eye_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,40 @@

import paddle
from paddle import fluid
from paddle.fluid import framework
from paddle.fluid import core, framework
from paddle.fluid.framework import Program, program_guard


class TestEyeOp(OpTest):
def setUp(self):
'''
Test eye op with specified shape
Test eye op with default shape
'''
self.python_api = paddle.eye
self.op_type = "eye"
self.init_dtype()
self.init_attrs()

self.inputs = {}
self.attrs = {
'num_rows': 219,
'num_columns': 319,
'dtype': framework.convert_np_dtype_to_dtype_(np.int32),
'num_rows': self.num_columns,
'num_columns': self.num_columns,
'dtype': framework.convert_np_dtype_to_dtype_(self.dtype),
}
self.outputs = {
'Out': np.eye(self.num_rows, self.num_columns, dtype=self.dtype)
}
self.outputs = {'Out': np.eye(219, 319, dtype=np.int32)}

def test_check_output(self):
self.check_output()

def init_dtype(self):
self.dtype = np.int32

def init_attrs(self):
self.num_rows = 319
self.num_columns = 319


class TestEyeOp1(OpTest):
def setUp(self):
Expand Down Expand Up @@ -178,6 +189,35 @@ def test_error(self):
paddle.eye(-1)


class TestEyeFP16OP(TestEyeOp):
'''Test eye op with specified dtype'''

def init_dtype(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestEyeBF16OP(OpTest):
def setUp(self):
self.op_type = "eye"
self.dtype = np.uint16
self.python_api = paddle.eye
self.inputs = {}
self.attrs = {
'num_rows': 219,
'num_columns': 319,
}
self.outputs = {'Out': np.eye(219, 319)}

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)


if __name__ == "__main__":
paddle.enable_static()
unittest.main()
71 changes: 61 additions & 10 deletions python/paddle/fluid/tests/unittests/test_frame_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,11 @@
import unittest

import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
from numpy.lib.stride_tricks import as_strided

import paddle
from paddle.fluid import core


def frame_from_librosa(x, frame_length, hop_length, axis=-1):
Expand Down Expand Up @@ -48,23 +49,28 @@ class TestFrameOp(OpTest):
def setUp(self):
self.op_type = "frame"
self.python_api = paddle.signal.frame
self.shape, self.type, self.attrs = self.initTestCase()
self.inputs = {
'X': np.random.random(size=self.shape).astype(self.type),
}

self.init_dtype()
self.init_shape()
self.init_attrs()

self.inputs = {'X': np.random.random(size=self.shape)}
self.outputs = {
'Out': frame_from_librosa(x=self.inputs['X'], **self.attrs)
}

def initTestCase(self):
input_shape = (150,)
input_type = 'float64'
attrs = {
def init_dtype(self):
self.dtype = 'float64'

def init_shape(self):
self.shape = (150,)

def init_attrs(self):
self.attrs = {
'frame_length': 50,
'hop_length': 15,
'axis': -1,
}
return input_shape, input_type, attrs

def test_check_output(self):
paddle.enable_static()
Expand Down Expand Up @@ -137,5 +143,50 @@ def initTestCase(self):
return input_shape, input_type, attrs


class TestFrameFP16OP(TestFrameOp):
def init_dtype(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestFrameBF16OP(OpTest):
def setUp(self):
self.op_type = "frame"
self.python_api = paddle.signal.frame
self.shape, self.dtype, self.attrs = self.initTestCase()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个好像没设置self.dtype=uint16?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@ZzSean 有的,initTestCase()会返回 np.uint16

image

x = np.random.random(size=self.shape).astype(np.float32)
out = frame_from_librosa(x, **self.attrs).copy()
self.inputs = {
'X': convert_float_to_uint16(x),
}
self.outputs = {'Out': convert_float_to_uint16(out)}

def initTestCase(self):
input_shape = (150,)
input_dtype = np.uint16
attrs = {
'frame_length': 50,
'hop_length': 15,
'axis': -1,
}
return input_shape, input_dtype, attrs

def test_check_output(self):
paddle.enable_static()
place = core.CUDAPlace(0)
self.check_output_with_place(place)
paddle.disable_static()

def test_check_grad_normal(self):
paddle.enable_static()
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
paddle.disable_static()


if __name__ == '__main__':
unittest.main()