Skip to content

Commit

Permalink
[PIR]Migrate Conv2DTranspose into pir (PaddlePaddle#58416)
Browse files Browse the repository at this point in the history
  • Loading branch information
0x45f authored Nov 2, 2023
1 parent 1cb50fb commit 38aff43
Show file tree
Hide file tree
Showing 6 changed files with 83 additions and 26 deletions.
1 change: 1 addition & 0 deletions paddle/fluid/pir/dialect/op_generator/op_build_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
'ReshapeWithXShapeInferMeta',
'SliceRawInferMeta',
'StackInferMeta',
'Conv2dTransposeInferMeta',
}

_PREPARE_DATA_WITH_VECTOR_INT64_MTTABLE_ATTRIBUTE = {'FrobeniusNormOp'}
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/nn/functional/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle import _C_ops, _legacy_C_ops, get_flags, in_dynamic_mode
from paddle import _C_ops, _legacy_C_ops, get_flags, in_dynamic_mode, pir
from paddle.base.framework import _global_flags, in_dynamic_or_pir_mode
from paddle.device import (
get_all_custom_device_type,
Expand Down Expand Up @@ -1241,7 +1241,7 @@ def conv2d_transpose(
output_size = convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, int):
output_size = convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, Variable):
elif isinstance(output_size, (Variable, pir.OpResult)):
check_dtype(
output_size.dtype,
'output_size',
Expand Down Expand Up @@ -1273,7 +1273,7 @@ def conv2d_transpose(
op_type = 'depthwise_conv2d_transpose'
use_cudnn = False

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
op = (
_C_ops.conv2d_transpose
if op_type == 'conv2d_transpose'
Expand Down
1 change: 0 additions & 1 deletion test/legacy_test/test_conv1d_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ def functional(self, place):
feed_dict["bias"] = self.bias
exe = base.Executor(place)
exe.run(start)
# breakpoint()
(y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var])
return y_np

Expand Down
26 changes: 16 additions & 10 deletions test/legacy_test/test_conv2d_transpose_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,9 +143,12 @@ def functional(self, place):
w_var = paddle.static.data(
"weight", self.weight_shape, dtype=self.dtype
)
b_var = paddle.static.data(
"bias", (self.num_filters,), dtype=self.dtype
)
if not self.no_bias:
b_var = paddle.static.data(
"bias", (self.num_filters,), dtype=self.dtype
)
else:
b_var = None

if self.output_padding != 0:
output_size = None
Expand All @@ -155,7 +158,7 @@ def functional(self, place):
y_var = F.conv2d_transpose(
x_var,
w_var,
None if self.no_bias else b_var,
b_var,
output_size=output_size,
padding=self.padding,
output_padding=self.output_padding,
Expand Down Expand Up @@ -199,8 +202,6 @@ def paddle_nn_layer(self):
return y_np

def _test_equivalence(self, place):
place = base.CPUPlace()

result1 = self.base_layer(place)
result2 = self.functional(place)

Expand All @@ -210,13 +211,18 @@ def _test_equivalence(self, place):
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)

def _test_pir_equivalence(self, place):
with paddle.pir_utils.IrGuard():
result1 = self.functional(place)
with dg.guard(place):
result2 = self.paddle_nn_layer()

np.testing.assert_array_almost_equal(result1, result2)

def runTest(self):
place = base.CPUPlace()
self._test_equivalence(place)

if base.core.is_compiled_with_cuda():
place = base.CUDAPlace(0)
self._test_equivalence(place)
self._test_pir_equivalence(place)


class Conv2DTransposeErrorTestCase(Conv2DTransposeTestCase):
Expand Down
62 changes: 50 additions & 12 deletions test/legacy_test/test_conv2d_transpose_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,10 +227,15 @@ def test_check_output(self):
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
place,
atol=1e-5,
check_dygraph=(not self.use_mkldnn),
check_pir=True,
)
else:
self.check_output(check_dygraph=(not self.use_mkldnn))
self.check_output(
check_dygraph=(not self.use_mkldnn), check_pir=True
)

def test_check_grad_no_input(self):
if self.need_check_grad:
Expand All @@ -242,19 +247,28 @@ def test_check_grad_no_input(self):
'Output',
max_relative_error=0.02,
no_grad_set={'Input'},
check_pir=True,
)
else:
self.check_grad(['Filter'], 'Output', no_grad_set={'Input'})
self.check_grad(
['Filter'], 'Output', no_grad_set={'Input'}, check_pir=True
)

def test_check_grad_no_filter(self):
if self.need_check_grad:
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set={'Filter'}
place,
['Input'],
'Output',
no_grad_set={'Filter'},
check_pir=True,
)
else:
self.check_grad(['Input'], 'Output', no_grad_set={'Filter'})
self.check_grad(
['Input'], 'Output', no_grad_set={'Filter'}, check_pir=True
)

def test_check_grad(self):
if self.need_check_grad:
Expand All @@ -265,10 +279,14 @@ def test_check_grad(self):
{'Input', 'Filter'},
'Output',
max_relative_error=0.02,
check_pir=True,
)
else:
self.check_grad(
{'Input', 'Filter'}, 'Output', max_relative_error=0.02
{'Input', 'Filter'},
'Output',
max_relative_error=0.02,
check_pir=True,
)

def init_test_case(self):
Expand Down Expand Up @@ -781,10 +799,15 @@ def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(
place, atol=0.02, check_dygraph=(not self.use_mkldnn)
place,
atol=0.02,
check_dygraph=(not self.use_mkldnn),
check_pir=True,
)
else:
self.check_output(check_dygraph=(not self.use_mkldnn))
self.check_output(
check_dygraph=(not self.use_mkldnn), check_pir=True
)

def test_check_grad_no_input(self):
if self.need_check_grad:
Expand All @@ -797,9 +820,12 @@ def test_check_grad_no_input(self):
'Output',
max_relative_error=0.02,
no_grad_set={'Input'},
check_pir=True,
)
else:
self.check_grad(['Filter'], 'Output', no_grad_set={'Input'})
self.check_grad(
['Filter'], 'Output', no_grad_set={'Input'}, check_pir=True
)

def test_check_grad_no_filter(self):
if self.need_check_grad:
Expand All @@ -812,9 +838,12 @@ def test_check_grad_no_filter(self):
'Output',
max_relative_error=0.02,
no_grad_set={'Filter'},
check_pir=True,
)
else:
self.check_grad(['Input'], 'Output', no_grad_set={'Filter'})
self.check_grad(
['Input'], 'Output', no_grad_set={'Filter'}, check_pir=True
)

def test_check_grad(self):
if self.need_check_grad:
Expand All @@ -826,10 +855,14 @@ def test_check_grad(self):
{'Input', 'Filter'},
'Output',
max_relative_error=0.02,
check_pir=True,
)
else:
self.check_grad(
{'Input', 'Filter'}, 'Output', max_relative_error=0.02
{'Input', 'Filter'},
'Output',
max_relative_error=0.02,
check_pir=True,
)


Expand Down Expand Up @@ -965,7 +998,10 @@ def init_op_type(self):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(
place, atol=0.02, check_dygraph=(not self.use_mkldnn)
place,
atol=0.02,
check_dygraph=(not self.use_mkldnn),
check_pir=True,
)

def test_check_grad_no_input(self):
Expand All @@ -978,6 +1014,7 @@ def test_check_grad_no_input(self):
max_relative_error=0.02,
no_grad_set={'Input'},
user_defined_grads=[numeric_grads],
check_pir=True,
)

def test_check_grad_no_filter(self):
Expand All @@ -990,6 +1027,7 @@ def test_check_grad_no_filter(self):
max_relative_error=0.02,
no_grad_set={'Filter'},
user_defined_grads=[numeric_grads],
check_pir=True,
)


Expand Down
13 changes: 13 additions & 0 deletions test/mkldnn/test_conv2d_transpose_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from test_conv2d_transpose_op import TestConv2DTransposeOp

from paddle import enable_static
from paddle.base import core


def conv2d_bias_naive(out, bias):
Expand All @@ -39,6 +40,18 @@ def test_check_grad_no_input(self):
def test_check_grad_no_filter(self):
return

def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_output_with_place(
place,
atol=1e-5,
check_dygraph=(not self.use_mkldnn),
)
else:
self.check_output(check_dygraph=(not self.use_mkldnn))

def init_op_type(self):
self.data_format = "NCHW"
self.op_type = "conv2d_transpose"
Expand Down

0 comments on commit 38aff43

Please sign in to comment.