Skip to content

Commit

Permalink
Added BF16 Pool2d grad (#37081)
Browse files Browse the repository at this point in the history
* Added BF16 Pool2d grad

* upstream pulled

* fix for CI

* fixes after review
  • Loading branch information
arlesniak authored Nov 16, 2021
1 parent 62ec644 commit f95d44a
Show file tree
Hide file tree
Showing 3 changed files with 97 additions and 10 deletions.
3 changes: 2 additions & 1 deletion paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -387,4 +387,5 @@ REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace,
ops::PoolMKLDNNOpKernel<paddle::platform::bfloat16>);

REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace,
ops::PoolMKLDNNGradOpKernel<float>);
ops::PoolMKLDNNGradOpKernel<float>,
ops::PoolMKLDNNGradOpKernel<paddle::platform::bfloat16>);
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,63 @@
from __future__ import print_function

import unittest
import os
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive
from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op_Mixin, max_pool2D_forward_naive
from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import pool2d_backward_navie as pool2d_backward_naive
from paddle import enable_static


@unittest.skipIf(not core.supports_bfloat16(),
"place does not support BF16 evaluation")
class TestPoolBf16MklDNNOp(TestPool2D_Op):
@OpTestTool.skip_if_not_cpu_bf16()
class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest):
def init_kernel_type(self):
self.use_mkldnn = True

def init_data_type(self):
self.dtype = np.uint16

def setUp(self):
super(TestPoolBf16MklDNNOpGrad, self).setUp()
self.attrs['mkldnn_data_type'] = "bfloat16"
self.x_fp32 = np.random.random(self.shape).astype(np.float32)

output = self.pool2D_forward_naive(
self.x_fp32, self.ksize, self.strides, self.paddings,
self.global_pool, self.ceil_mode, self.exclusive, self.adaptive,
"float32").astype(np.float32)

self.inputs = {'X': convert_float_to_uint16(self.x_fp32)}
self.outputs = {'Out': convert_float_to_uint16(output)}

def test_check_output(self):
self.check_output_with_place(core.CPUPlace())

def test_check_grad(self):
x_grad = pool2d_backward_naive(
self.x_fp32,
ksize=self.ksize,
strides=self.strides,
paddings=self.paddings,
global_pool=self.global_pool,
ceil_mode=False,
exclusive=self.exclusive,
adaptive=self.adaptive,
data_format=self.data_format,
pool_type=self.pool_type,
padding_algorithm=self.padding_algorithm)
x_grad = x_grad / np.prod(self.outputs['Out'].shape)
self.check_grad_with_place(
core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad])


@OpTestTool.skip_if_not_cpu_bf16()
class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest):
def init_kernel_type(self):
self.use_mkldnn = True

def setUp(self):
TestPool2D_Op.setUp(self)
TestPool2D_Op_Mixin.setUp(self)
self.dtype = np.uint16

input = np.random.random(self.shape).astype(np.float32)
Expand Down Expand Up @@ -95,6 +136,47 @@ def init_pool_type(self):
self.pool2D_forward_naive = max_pool2D_forward_naive


class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]

def init_shape(self):
self.shape = [2, 3, 7, 7]

def init_paddings(self):
self.paddings = [0, 0]

def init_global_pool(self):
self.global_pool = False

def init_exclusive(self):
self.exclusive = True


class TestCase2PadOneNonExclusiveAvgGrad(TestCase1PadZeroExclusiveAvgGrad):
def init_exclusive(self):
self.exclusive = False


class TestCase0InitialMaxGrad(TestPoolBf16MklDNNOpGrad):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive


class TestCase1PadZeroExclusiveMaxGrad(TestCase1PadZeroExclusiveAvgGrad):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive


class TestCase2PadOneNonExclusiveMaxGrad(TestCase2PadOneNonExclusiveAvgGrad):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive


if __name__ == "__main__":
enable_static()
unittest.main()
8 changes: 6 additions & 2 deletions python/paddle/fluid/tests/unittests/test_pool2d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import numpy as np

import paddle.fluid.core as core
from op_test import OpTest
from paddle.fluid.tests.unittests.op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard

Expand Down Expand Up @@ -252,7 +252,7 @@ def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
return out


class TestPool2D_Op(OpTest):
class TestPool2D_Op_Mixin(object):
def setUp(self):
self.op_type = "pool2d"
self.use_cudnn = False
Expand Down Expand Up @@ -363,6 +363,10 @@ def init_adaptive(self):
self.adaptive = False


class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest):
pass


class TestCase1(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
Expand Down

0 comments on commit f95d44a

Please sign in to comment.