From c8daa8566d9a8f4e631eb1b283ae3652c12657a8 Mon Sep 17 00:00:00 2001 From: Wuwei Lin Date: Tue, 12 Jul 2022 14:06:45 -0700 Subject: [PATCH] add tests --- python/tvm/topi/nn/conv1d.py | 35 +++++++++++++------ python/tvm/topi/nn/conv2d.py | 30 ++++++++++------ python/tvm/topi/nn/conv3d.py | 4 +-- python/tvm/topi/testing/common.py | 2 +- .../topi/python/test_topi_conv2d_nhwc.py | 31 +++++++++++++++- 5 files changed, 77 insertions(+), 25 deletions(-) diff --git a/python/tvm/topi/nn/conv1d.py b/python/tvm/topi/nn/conv1d.py index 560a342d5659f..ee388b4297f46 100644 --- a/python/tvm/topi/nn/conv1d.py +++ b/python/tvm/topi/nn/conv1d.py @@ -19,18 +19,27 @@ from .conv2d import conv -def conv1d(data, kernel, strides=1, padding="VALID", dilation=1, layout="NCW", out_dtype=None): +def conv1d( + data, + kernel, + strides=1, + padding="VALID", + dilation=1, + data_layout="NCW", + kernel_layout="", + out_dtype=None, +): """1D convolution forward operator. Parameters ---------- data : tvm.te.Tensor - 3-D input shape [batch, in_channel, in_width] for layout == 'NCW' - and [batch, in_width, in_channel] for layout == 'NWC' + 3-D input shape [batch, in_channel, in_width] for data_layout == 'NCW' + and [batch, in_width, in_channel] for data_layout == 'NWC' kernel : tvm.te.Tensor - 3-D kernel with shape [num_filter, in_channel, filter_size] for layout == 'NCW' - and [filter_size, in_channel, num_filter] for layout == 'NWC' + 3-D kernel with shape [num_filter, in_channel, filter_size] for kernel_layout == 'OIW' + and [filter_size, in_channel, num_filter] for kernel_layout == 'WIO' strides : int or tuple The spatial stride along width @@ -41,23 +50,27 @@ def conv1d(data, kernel, strides=1, padding="VALID", dilation=1, layout="NCW", o dilation : int or tuple Dilation rate if convolution should be dilated. - layout : str + data_layout : str How input data is laid out, must be one of ['NCW', 'NWC'] + kernel_layout: Optiona[str] + The layout of the kernel. If unspecified, use default layout. "OIW" if data_layout == "NCW", + "WIO" if data_layout == "NWC". + out_dtype : str The output data type. If None then output is same type as input. """ - return conv(data, kernel, strides, padding, dilation, 1, layout, "", out_dtype) + return conv(data, kernel, strides, padding, dilation, 1, data_layout, kernel_layout, out_dtype) def conv1d_nwc(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None): """1D convolution in NWC layout. See :py:func:`conv` for details on parameters""" - return conv(data, kernel, strides, padding, dilation, 1, "NWC", "", out_dtype=out_dtype) + return conv(data, kernel, strides, padding, dilation, 1, "NWC", "WIO", out_dtype=out_dtype) def conv1d_ncw(data, kernel, strides=1, padding="VALID", dilation=1, out_dtype=None): """1D convolution in NCW layout. See :py:func:`conv` for details on parameters""" - return conv(data, kernel, strides, padding, dilation, 1, "NCW", "", out_dtype=out_dtype) + return conv(data, kernel, strides, padding, dilation, 1, "NCW", "OIW", out_dtype=out_dtype) def group_conv1d_nwc( @@ -89,7 +102,7 @@ def group_conv1d_nwc( out_dtype : str The output data type. If None then output is same type as input. """ - return conv(data, kernel, strides, padding, dilation, groups, "NWC", "", out_dtype=out_dtype) + return conv(data, kernel, strides, padding, dilation, groups, "NWC", "WIO", out_dtype=out_dtype) def group_conv1d_ncw( @@ -121,4 +134,4 @@ def group_conv1d_ncw( out_dtype : str The output data type. If None then output is same type as input. """ - return conv(data, kernel, strides, padding, dilation, groups, "NCW", "", out_dtype=out_dtype) + return conv(data, kernel, strides, padding, dilation, groups, "NCW", "OIW", out_dtype=out_dtype) diff --git a/python/tvm/topi/nn/conv2d.py b/python/tvm/topi/nn/conv2d.py index 32bf3f703f347..b6228c663aa2b 100644 --- a/python/tvm/topi/nn/conv2d.py +++ b/python/tvm/topi/nn/conv2d.py @@ -57,16 +57,18 @@ ) -def conv2d(input, filter, strides, padding, dilation, layout="NCHW", out_dtype=None): +def conv2d( + input, filter, strides, padding, dilation, data_layout="NCHC", kernel_layout="", out_dtype=None +): """Conv2D operator. Parameters ---------- input : tvm.te.Tensor - 4-D with shape [batch, in_channel, in_height, in_width] + 4-D with shape [batch, in_channel, in_height, in_width] in data_layout filter : tvm.te.Tensor - 4-D with shape [num_filter, in_channel, filter_height, filter_width] + 4-D with shape [num_filter, in_channel, filter_height, filter_width] in kernel_layout strides : int or a list/tuple of two ints stride size, or [stride_height, stride_width] @@ -79,9 +81,13 @@ def conv2d(input, filter, strides, padding, dilation, layout="NCHW", out_dtype=N dilation: int or a list/tuple of two ints dilation size, or [dilation_height, dilation_width] - layout : str + data_layout : str layout of data + kernel_layout : Optional[str] + layout of kernel. If unspecified, use default layout inferred from data_layout. "OHWI" if + data_layout == "NCHW", "HWIO" if data_layout == "NHWC". + Returns ------- output : tvm.te.Tensor @@ -89,7 +95,7 @@ def conv2d(input, filter, strides, padding, dilation, layout="NCHW", out_dtype=N """ # search platform specific declaration first # default declaration - return conv(input, filter, strides, padding, dilation, 1, layout, "", out_dtype) + return conv(input, filter, strides, padding, dilation, 1, data_layout, kernel_layout, out_dtype) @tvm.target.generic_func @@ -239,7 +245,7 @@ def conv2d_nchw(Input, Filter, stride, padding, dilation, out_dtype=None): Output : tvm.te.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ - return conv(Input, Filter, stride, padding, dilation, 1, "NCHW", "", out_dtype=out_dtype) + return conv(Input, Filter, stride, padding, dilation, 1, "NCHW", "OIHW", out_dtype=out_dtype) def conv2d_hwcn(Input, Filter, stride, padding, dilation, out_dtype=None): @@ -269,7 +275,7 @@ def conv2d_hwcn(Input, Filter, stride, padding, dilation, out_dtype=None): output : tvm.te.Tensor 4-D with shape [out_height, out_width, out_channel, batch] """ - return conv(Input, Filter, stride, padding, dilation, 1, "HWCN", "", out_dtype=out_dtype) + return conv(Input, Filter, stride, padding, dilation, 1, "HWCN", "HWIO", out_dtype=out_dtype) def conv2d_nhwc( @@ -325,7 +331,7 @@ def conv2d_nhwc( dilation, 1, "NHWC", - "", + "HWIO", out_dtype, auto_scheduler_rewritten_layout, meta_schedule_original_shape, @@ -709,7 +715,9 @@ def group_conv2d_nchw(Input, Filter, stride, padding, dilation, groups, out_dtyp Output : tvm.te.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ - return conv(Input, Filter, stride, padding, dilation, groups, "NCHW", "", out_dtype=out_dtype) + return conv( + Input, Filter, stride, padding, dilation, groups, "NCHW", "OIHW", out_dtype=out_dtype + ) def conv( @@ -943,7 +951,9 @@ def group_conv2d_nhwc(Input, Filter, stride, padding, dilation, groups, out_dtyp Output : tvm.te.Tensor 4-D with shape [batch, out_height, out_width, out_channel] """ - return conv(Input, Filter, stride, padding, dilation, groups, "NHWC", "", out_dtype=out_dtype) + return conv( + Input, Filter, stride, padding, dilation, groups, "NHWC", "HWIO", out_dtype=out_dtype + ) def unpack_NCHWc_to_nchw(packed_out, out_dtype): diff --git a/python/tvm/topi/nn/conv3d.py b/python/tvm/topi/nn/conv3d.py index e3e762be47615..1897484dc8cdb 100644 --- a/python/tvm/topi/nn/conv3d.py +++ b/python/tvm/topi/nn/conv3d.py @@ -53,7 +53,7 @@ def conv3d_ncdhw(Input, Filter, stride, padding, dilation, groups, out_dtype=Non Output : tvm.te.Tensor 5-D with shape [batch, out_channel, out_depth, out_height, out_width] """ - return conv(Input, Filter, stride, padding, dilation, groups, "NCDHW", "", out_dtype) + return conv(Input, Filter, stride, padding, dilation, groups, "NCDHW", "OIDHW", out_dtype) def conv3d_ndhwc( @@ -111,7 +111,7 @@ def conv3d_ndhwc( dilation, groups, "NDHWC", - "", + "DHWIO", out_dtype, auto_scheduler_rewritten_layout, meta_schedule_origin_shape, diff --git a/python/tvm/topi/testing/common.py b/python/tvm/topi/testing/common.py index d040310ccc8fb..3fb48796f5257 100644 --- a/python/tvm/topi/testing/common.py +++ b/python/tvm/topi/testing/common.py @@ -21,7 +21,7 @@ import scipy.signal import tvm -from tvm import topi +from tvm import te, topi from tvm.testing import assert_allclose _injective_schedule = { diff --git a/tests/python/topi/python/test_topi_conv2d_nhwc.py b/tests/python/topi/python/test_topi_conv2d_nhwc.py index 362de3a76909a..e60cf12aa83e6 100644 --- a/tests/python/topi/python/test_topi_conv2d_nhwc.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc.py @@ -77,7 +77,7 @@ def ref_data(dtype, batch, in_channel, in_size, num_filter, kernel, stride, padd return a_np, w_np, b_np -def test_conv2d_nhwc(target, dev, ref_data, dtype, stride, padding, dilation): +def test_conv2d_nhwc_hwio(target, dev, ref_data, dtype, stride, padding, dilation): a_np, w_np, b_np = ref_data A = te.placeholder(a_np.shape, name="A", dtype=dtype) @@ -95,5 +95,34 @@ def test_conv2d_nhwc(target, dev, ref_data, dtype, stride, padding, dilation): tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) +def test_conv2d_nhwc_ohwi(ref_data, dtype, stride, padding, dilation): + # only test on CPU target because topi doesn't have schedules for this layout + target = "llvm" + dev = tvm.device(target, 0) + a_np, w_np_hwio, b_np = ref_data + w_np_ohwi = w_np_hwio.transpose(3, 0, 1, 2) # HWIO -> OHWI + + A = te.placeholder(a_np.shape, name="A", dtype=dtype) + W = te.placeholder(w_np_ohwi.shape, name="W", dtype=dtype) + + B = topi.nn.conv2d( + A, + W, + stride, + padding, + dilation, + data_layout="NHWC", + kernel_layout="OHWI", + out_dtype="float32", + ) + s = tvm.te.create_schedule(B.op) + a = tvm.nd.array(a_np, dev) + w = tvm.nd.array(w_np_ohwi, dev) + b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) + func = tvm.build(s, [A, W, B], target) + func(a, w, b) + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) + + if __name__ == "__main__": tvm.testing.main()