Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 4】No.63 fix temporal_shift and conj #51532

Merged
merged 11 commits into from
Mar 23, 2023
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/complex_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ PD_REGISTER_KERNEL(conj,
ALL_LAYOUT,
phi::ConjKernel,
phi::dtype::float16,
phi::dtype::bfloat16,
phi::dtype::complex<float>,
phi::dtype::complex<double>,
float,
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/gpu/temporal_shift_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -146,4 +146,5 @@ PD_REGISTER_KERNEL(temporal_shift_grad,
phi::TemporalShiftGradKernel,
float,
double,
phi::dtype::float16) {}
phi::dtype::float16,
phi::dtype::bfloat16) {}
3 changes: 2 additions & 1 deletion paddle/phi/kernels/gpu/temporal_shift_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -146,4 +146,5 @@ PD_REGISTER_KERNEL(temporal_shift,
phi::TemporalShiftKernel,
float,
double,
phi::dtype::float16) {}
phi::dtype::float16,
phi::dtype::bfloat16) {}
41 changes: 40 additions & 1 deletion python/paddle/fluid/tests/unittests/test_conj_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,10 @@
import paddle

sys.path.append("..")
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
from numpy.random import random as rand

import paddle.fluid.core as core
import paddle.fluid.dygraph as dg
import paddle.static as static

Expand Down Expand Up @@ -147,5 +148,43 @@ def testfp16(self):
out = exe.run(feed={'x': input_x}, fetch_list=[out])


class TestConjFP16OP(TestConjOp):
def init_dtype_type(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestConjBF16(OpTest):
def setUp(self):
self.op_type = "conj"
self.python_api = paddle.tensor.conj
self.init_dtype_type()
self.init_input_output()

def init_dtype_type(self):
self.dtype = np.uint16

def init_input_output(self):
x = (
np.random.random((12, 14)) + 1j * np.random.random((12, 14))
).astype(np.float32)
out = np.conj(x)

self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': convert_float_to_uint16(out)}

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')


if __name__ == "__main__":
unittest.main()
57 changes: 55 additions & 2 deletions python/paddle/fluid/tests/unittests/test_temporal_shift_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from op_test import OpTest
from op_test import OpTest, convert_float_to_uint16

import paddle
from paddle.fluid import core
Expand Down Expand Up @@ -44,6 +44,7 @@ def temporal_shift(x, seg_num, shift_ratio, data_format):
class TestTemporalShift(OpTest):
def setUp(self):
self.initTestCase()
self.init_dtype()
self.op_type = 'temporal_shift'
self.python_api = paddle.nn.functional.temporal_shift
x = np.random.random(self.x_shape).astype(self.dtype)
Expand All @@ -64,6 +65,9 @@ def setUp(self):
self.outputs = {"Out": output}
self.python_out_sig = ["Out"]

def init_dtype(self):
self.dtype = 'float64'

def test_check_output(self):
self.check_output(check_eager=True)

Expand All @@ -74,7 +78,6 @@ def initTestCase(self):
self.x_shape = (6, 4, 4, 4)
self.seg_num = 3
self.shift_ratio = 0.25
self.dtype = 'float64'
self.data_format = 'NCHW'


Expand Down Expand Up @@ -174,6 +177,56 @@ def attr_data_format():
self.assertRaises(ValueError, attr_data_format)


class TestTemporalShiftFP16OP(TestTemporalShift):
def init_dtype(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestTemporalShiftBF16(OpTest):
def initTestCase(self):
self.x_shape = (3, 10, 5, 5)
self.seg_num = 1
self.shift_ratio = 0.3
self.dtype = np.uint16
self.data_format = 'NCHW'

def setUp(self):
self.initTestCase()
self.op_type = 'temporal_shift'
self.python_api = paddle.nn.functional.temporal_shift

x = np.random.random(self.x_shape).astype(np.float32)

self.attrs = {
"seg_num": self.seg_num,
"shift_ratio": self.shift_ratio,
"data_format": self.data_format,
}

self.inputs = {
"X": convert_float_to_uint16(x),
}

output = temporal_shift(
x, self.seg_num, self.shift_ratio, self.data_format
)
self.outputs = {"Out": convert_float_to_uint16(output)}
self.python_out_sig = ["Out"]

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)

def test_check_grad_ignore_uv(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')


if __name__ == "__main__":
paddle.enable_static()
unittest.main()