From f95d44a24a8cdf1c66f6c8b0468af2ac204008e2 Mon Sep 17 00:00:00 2001 From: arlesniak Date: Tue, 16 Nov 2021 11:19:04 +0100 Subject: [PATCH] Added BF16 Pool2d grad (#37081) * Added BF16 Pool2d grad * upstream pulled * fix for CI * fixes after review --- .../fluid/operators/mkldnn/pool_mkldnn_op.cc | 3 +- .../mkldnn/test_pool2d_bf16_mkldnn_op.py | 96 +++++++++++++++++-- .../fluid/tests/unittests/test_pool2d_op.py | 8 +- 3 files changed, 97 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index 9e437fb15e917..d86bab9d3a42f 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -387,4 +387,5 @@ REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace, ops::PoolMKLDNNOpKernel); REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace, - ops::PoolMKLDNNGradOpKernel); + ops::PoolMKLDNNGradOpKernel, + ops::PoolMKLDNNGradOpKernel); diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py index da37b33d30d5d..5430c1598f84d 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py @@ -15,22 +15,63 @@ from __future__ import print_function import unittest -import os import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive +from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op_Mixin, max_pool2D_forward_naive +from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import pool2d_backward_navie as pool2d_backward_naive from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") -class TestPoolBf16MklDNNOp(TestPool2D_Op): +@OpTestTool.skip_if_not_cpu_bf16() +class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest): + def init_kernel_type(self): + self.use_mkldnn = True + + def init_data_type(self): + self.dtype = np.uint16 + + def setUp(self): + super(TestPoolBf16MklDNNOpGrad, self).setUp() + self.attrs['mkldnn_data_type'] = "bfloat16" + self.x_fp32 = np.random.random(self.shape).astype(np.float32) + + output = self.pool2D_forward_naive( + self.x_fp32, self.ksize, self.strides, self.paddings, + self.global_pool, self.ceil_mode, self.exclusive, self.adaptive, + "float32").astype(np.float32) + + self.inputs = {'X': convert_float_to_uint16(self.x_fp32)} + self.outputs = {'Out': convert_float_to_uint16(output)} + + def test_check_output(self): + self.check_output_with_place(core.CPUPlace()) + + def test_check_grad(self): + x_grad = pool2d_backward_naive( + self.x_fp32, + ksize=self.ksize, + strides=self.strides, + paddings=self.paddings, + global_pool=self.global_pool, + ceil_mode=False, + exclusive=self.exclusive, + adaptive=self.adaptive, + data_format=self.data_format, + pool_type=self.pool_type, + padding_algorithm=self.padding_algorithm) + x_grad = x_grad / np.prod(self.outputs['Out'].shape) + self.check_grad_with_place( + core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad]) + + +@OpTestTool.skip_if_not_cpu_bf16() +class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest): def init_kernel_type(self): self.use_mkldnn = True def setUp(self): - TestPool2D_Op.setUp(self) + TestPool2D_Op_Mixin.setUp(self) self.dtype = np.uint16 input = np.random.random(self.shape).astype(np.float32) @@ -95,6 +136,47 @@ def init_pool_type(self): self.pool2D_forward_naive = max_pool2D_forward_naive +class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad): + def init_test_case(self): + self.ksize = [3, 3] + self.strides = [1, 1] + + def init_shape(self): + self.shape = [2, 3, 7, 7] + + def init_paddings(self): + self.paddings = [0, 0] + + def init_global_pool(self): + self.global_pool = False + + def init_exclusive(self): + self.exclusive = True + + +class TestCase2PadOneNonExclusiveAvgGrad(TestCase1PadZeroExclusiveAvgGrad): + def init_exclusive(self): + self.exclusive = False + + +class TestCase0InitialMaxGrad(TestPoolBf16MklDNNOpGrad): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + +class TestCase1PadZeroExclusiveMaxGrad(TestCase1PadZeroExclusiveAvgGrad): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + +class TestCase2PadOneNonExclusiveMaxGrad(TestCase2PadOneNonExclusiveAvgGrad): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + if __name__ == "__main__": enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index d66bdd2948d46..582ec9501068c 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -19,7 +19,7 @@ import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -252,7 +252,7 @@ def _get_padding_with_SAME(input_shape, pool_size, pool_stride): return out -class TestPool2D_Op(OpTest): +class TestPool2D_Op_Mixin(object): def setUp(self): self.op_type = "pool2d" self.use_cudnn = False @@ -363,6 +363,10 @@ def init_adaptive(self): self.adaptive = False +class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest): + pass + + class TestCase1(TestPool2D_Op): def init_test_case(self): self.ksize = [3, 3]