Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AMP OP&Test] Support float16 in selu #54030

Merged
merged 2 commits into from
May 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/activation_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -274,4 +274,5 @@ PD_REGISTER_KERNEL(selu,
phi::SeluKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/selu_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,5 @@ PD_REGISTER_KERNEL(selu_grad,
phi::SeluGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
39 changes: 33 additions & 6 deletions python/paddle/fluid/tests/unittests/test_selu_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16

import paddle
import paddle.nn.functional as F
Expand Down Expand Up @@ -43,23 +43,29 @@ def setUp(self):
self.op_type = "selu"
self.python_api = paddle.nn.functional.selu
self.x_shape = [3, 5, 5, 10]
self.dtype = np.float64
self.init_x_shape()
self.init_dtype()

alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946

x = np.random.normal(size=self.x_shape).astype(self.dtype)
if self.dtype == np.uint16:
x = np.random.normal(size=self.x_shape).astype(np.float32)
else:
x = np.random.normal(size=self.x_shape).astype(self.dtype)

# Since zero point in selu is not differentiable, avoid randomize
# zero.
x[np.abs(x) < 0.005] = 0.02

out = ref_selu(x, scale, alpha)

self.inputs = {'X': x}
self.outputs = {'Out': out}
if self.dtype == np.uint16:
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': convert_float_to_uint16(out)}
else:
self.inputs = {'X': x}
self.outputs = {'Out': out}

self.attrs = {
'alpha': alpha,
Expand All @@ -70,7 +76,7 @@ def init_x_shape(self):
pass

def init_dtype(self):
pass
self.dtype = np.float64

def test_check_output(self):
self.check_output()
Expand All @@ -79,6 +85,27 @@ def test_check_grad(self):
self.check_grad(['X'], 'Out')


class SeluTestFP16OP(SeluTest):
def init_dtype(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA and do not support bfloat16",
)
class SeluTestBF16OP(SeluTest):
def init_dtype(self):
self.dtype = np.uint16

def test_check_output(self):
self.check_output_with_place(core.CUDAPlace(0))

def test_check_grad(self):
self.check_grad_with_place(core.CUDAPlace(0), ['X'], 'Out')


class TestSeluAPI(unittest.TestCase):
# test paddle.nn.SELU, paddle.nn.functional.selu
def setUp(self):
Expand Down