From 38aff432c53c55d3186ed8b75c41a1170eec970b Mon Sep 17 00:00:00 2001 From: WangZhen <23097963+0x45f@users.noreply.github.com> Date: Thu, 2 Nov 2023 14:22:52 +0800 Subject: [PATCH] [PIR]Migrate Conv2DTranspose into pir (#58416) --- .../pir/dialect/op_generator/op_build_gen.py | 1 + python/paddle/nn/functional/conv.py | 6 +- test/legacy_test/test_conv1d_layer.py | 1 - .../test_conv2d_transpose_layer.py | 26 +++++--- test/legacy_test/test_conv2d_transpose_op.py | 62 +++++++++++++++---- .../mkldnn/test_conv2d_transpose_mkldnn_op.py | 13 ++++ 6 files changed, 83 insertions(+), 26 deletions(-) diff --git a/paddle/fluid/pir/dialect/op_generator/op_build_gen.py b/paddle/fluid/pir/dialect/op_generator/op_build_gen.py index 86cae425e9d14e..833990a4e22773 100644 --- a/paddle/fluid/pir/dialect/op_generator/op_build_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/op_build_gen.py @@ -22,6 +22,7 @@ 'ReshapeWithXShapeInferMeta', 'SliceRawInferMeta', 'StackInferMeta', + 'Conv2dTransposeInferMeta', } _PREPARE_DATA_WITH_VECTOR_INT64_MTTABLE_ATTRIBUTE = {'FrobeniusNormOp'} diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index c1e1d8f8137dbb..4b77912b463c45 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import _C_ops, _legacy_C_ops, get_flags, in_dynamic_mode +from paddle import _C_ops, _legacy_C_ops, get_flags, in_dynamic_mode, pir from paddle.base.framework import _global_flags, in_dynamic_or_pir_mode from paddle.device import ( get_all_custom_device_type, @@ -1241,7 +1241,7 @@ def conv2d_transpose( output_size = convert_to_list(output_size, 2, 'output_size') elif isinstance(output_size, int): output_size = convert_to_list(output_size, 2, 'output_size') - elif isinstance(output_size, Variable): + elif isinstance(output_size, (Variable, pir.OpResult)): check_dtype( output_size.dtype, 'output_size', @@ -1273,7 +1273,7 @@ def conv2d_transpose( op_type = 'depthwise_conv2d_transpose' use_cudnn = False - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): op = ( _C_ops.conv2d_transpose if op_type == 'conv2d_transpose' diff --git a/test/legacy_test/test_conv1d_layer.py b/test/legacy_test/test_conv1d_layer.py index 8c2264b1604b17..48bd182c486b97 100644 --- a/test/legacy_test/test_conv1d_layer.py +++ b/test/legacy_test/test_conv1d_layer.py @@ -121,7 +121,6 @@ def functional(self, place): feed_dict["bias"] = self.bias exe = base.Executor(place) exe.run(start) - # breakpoint() (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np diff --git a/test/legacy_test/test_conv2d_transpose_layer.py b/test/legacy_test/test_conv2d_transpose_layer.py index 78634d51249297..6f4a5bb3868c79 100644 --- a/test/legacy_test/test_conv2d_transpose_layer.py +++ b/test/legacy_test/test_conv2d_transpose_layer.py @@ -143,9 +143,12 @@ def functional(self, place): w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = paddle.static.data( - "bias", (self.num_filters,), dtype=self.dtype - ) + if not self.no_bias: + b_var = paddle.static.data( + "bias", (self.num_filters,), dtype=self.dtype + ) + else: + b_var = None if self.output_padding != 0: output_size = None @@ -155,7 +158,7 @@ def functional(self, place): y_var = F.conv2d_transpose( x_var, w_var, - None if self.no_bias else b_var, + b_var, output_size=output_size, padding=self.padding, output_padding=self.output_padding, @@ -199,8 +202,6 @@ def paddle_nn_layer(self): return y_np def _test_equivalence(self, place): - place = base.CPUPlace() - result1 = self.base_layer(place) result2 = self.functional(place) @@ -210,13 +211,18 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result1, result2) np.testing.assert_array_almost_equal(result2, result3) + def _test_pir_equivalence(self, place): + with paddle.pir_utils.IrGuard(): + result1 = self.functional(place) + with dg.guard(place): + result2 = self.paddle_nn_layer() + + np.testing.assert_array_almost_equal(result1, result2) + def runTest(self): place = base.CPUPlace() self._test_equivalence(place) - - if base.core.is_compiled_with_cuda(): - place = base.CUDAPlace(0) - self._test_equivalence(place) + self._test_pir_equivalence(place) class Conv2DTransposeErrorTestCase(Conv2DTransposeTestCase): diff --git a/test/legacy_test/test_conv2d_transpose_op.py b/test/legacy_test/test_conv2d_transpose_op.py index ef610d3af05167..339ef086d7b818 100644 --- a/test/legacy_test/test_conv2d_transpose_op.py +++ b/test/legacy_test/test_conv2d_transpose_op.py @@ -227,10 +227,15 @@ def test_check_output(self): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(not self.use_mkldnn) + place, + atol=1e-5, + check_dygraph=(not self.use_mkldnn), + check_pir=True, ) else: - self.check_output(check_dygraph=(not self.use_mkldnn)) + self.check_output( + check_dygraph=(not self.use_mkldnn), check_pir=True + ) def test_check_grad_no_input(self): if self.need_check_grad: @@ -242,19 +247,28 @@ def test_check_grad_no_input(self): 'Output', max_relative_error=0.02, no_grad_set={'Input'}, + check_pir=True, ) else: - self.check_grad(['Filter'], 'Output', no_grad_set={'Input'}) + self.check_grad( + ['Filter'], 'Output', no_grad_set={'Input'}, check_pir=True + ) def test_check_grad_no_filter(self): if self.need_check_grad: if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( - place, ['Input'], 'Output', no_grad_set={'Filter'} + place, + ['Input'], + 'Output', + no_grad_set={'Filter'}, + check_pir=True, ) else: - self.check_grad(['Input'], 'Output', no_grad_set={'Filter'}) + self.check_grad( + ['Input'], 'Output', no_grad_set={'Filter'}, check_pir=True + ) def test_check_grad(self): if self.need_check_grad: @@ -265,10 +279,14 @@ def test_check_grad(self): {'Input', 'Filter'}, 'Output', max_relative_error=0.02, + check_pir=True, ) else: self.check_grad( - {'Input', 'Filter'}, 'Output', max_relative_error=0.02 + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.02, + check_pir=True, ) def init_test_case(self): @@ -781,10 +799,15 @@ def test_check_output(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place( - place, atol=0.02, check_dygraph=(not self.use_mkldnn) + place, + atol=0.02, + check_dygraph=(not self.use_mkldnn), + check_pir=True, ) else: - self.check_output(check_dygraph=(not self.use_mkldnn)) + self.check_output( + check_dygraph=(not self.use_mkldnn), check_pir=True + ) def test_check_grad_no_input(self): if self.need_check_grad: @@ -797,9 +820,12 @@ def test_check_grad_no_input(self): 'Output', max_relative_error=0.02, no_grad_set={'Input'}, + check_pir=True, ) else: - self.check_grad(['Filter'], 'Output', no_grad_set={'Input'}) + self.check_grad( + ['Filter'], 'Output', no_grad_set={'Input'}, check_pir=True + ) def test_check_grad_no_filter(self): if self.need_check_grad: @@ -812,9 +838,12 @@ def test_check_grad_no_filter(self): 'Output', max_relative_error=0.02, no_grad_set={'Filter'}, + check_pir=True, ) else: - self.check_grad(['Input'], 'Output', no_grad_set={'Filter'}) + self.check_grad( + ['Input'], 'Output', no_grad_set={'Filter'}, check_pir=True + ) def test_check_grad(self): if self.need_check_grad: @@ -826,10 +855,14 @@ def test_check_grad(self): {'Input', 'Filter'}, 'Output', max_relative_error=0.02, + check_pir=True, ) else: self.check_grad( - {'Input', 'Filter'}, 'Output', max_relative_error=0.02 + {'Input', 'Filter'}, + 'Output', + max_relative_error=0.02, + check_pir=True, ) @@ -965,7 +998,10 @@ def init_op_type(self): def test_check_output(self): place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=0.02, check_dygraph=(not self.use_mkldnn) + place, + atol=0.02, + check_dygraph=(not self.use_mkldnn), + check_pir=True, ) def test_check_grad_no_input(self): @@ -978,6 +1014,7 @@ def test_check_grad_no_input(self): max_relative_error=0.02, no_grad_set={'Input'}, user_defined_grads=[numeric_grads], + check_pir=True, ) def test_check_grad_no_filter(self): @@ -990,6 +1027,7 @@ def test_check_grad_no_filter(self): max_relative_error=0.02, no_grad_set={'Filter'}, user_defined_grads=[numeric_grads], + check_pir=True, ) diff --git a/test/mkldnn/test_conv2d_transpose_mkldnn_op.py b/test/mkldnn/test_conv2d_transpose_mkldnn_op.py index 55fdbefe16c0a3..f5b8a40714d4b8 100644 --- a/test/mkldnn/test_conv2d_transpose_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -19,6 +19,7 @@ from test_conv2d_transpose_op import TestConv2DTransposeOp from paddle import enable_static +from paddle.base import core def conv2d_bias_naive(out, bias): @@ -39,6 +40,18 @@ def test_check_grad_no_input(self): def test_check_grad_no_filter(self): return + def test_check_output(self): + # TODO(wangzhongpu): support mkldnn op in dygraph mode + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place( + place, + atol=1e-5, + check_dygraph=(not self.use_mkldnn), + ) + else: + self.check_output(check_dygraph=(not self.use_mkldnn)) + def init_op_type(self): self.data_format = "NCHW" self.op_type = "conv2d_transpose"