Skip to content

Commit

Permalink
Support SAME padding for dynamic workloads when stride == 1
Browse files Browse the repository at this point in the history
  • Loading branch information
Trevor Morris committed Apr 19, 2021
1 parent f57830b commit d62f764
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 1 deletion.
6 changes: 5 additions & 1 deletion python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,13 @@ def list_shape_of(tensor, ndim):


def _get_pad_pair(input1d, kernel1d, stride1d):
if input1d % stride1d == 0:
if stride1d == 1 or input1d % stride1d == 0:
pad = max(kernel1d - stride1d, 0)
else:
if isinstance(input1d, tvm.tir.Any):
raise tvm.error.OpAttributeUnImplemented(
"SAME padding is not supported in combination with dynamic height or width when stride is not 1."
)
pad = max(kernel1d - (input1d % stride1d), 0)

pad_before = pad // 2
Expand Down
26 changes: 26 additions & 0 deletions tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@ def compare_tf_with_tvm(
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
ignore_in_shape=False,
):
"""Generic function to generate and compare tensorflow and TVM output"""

Expand Down Expand Up @@ -259,6 +260,7 @@ def name_without_num(name):
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
ignore_in_shape=ignore_in_shape,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
Expand Down Expand Up @@ -313,6 +315,20 @@ def _test_pooling(input_shape, **kwargs):
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)

def _test_pooling_dynamic(input_shape, np_shape, **kwargs):
""" Pooling with dynamic height and width dimensions. """
x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1

with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)

if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"

compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True)

@tvm.testing.uses_gpu
def test_forward_pooling():
Expand Down Expand Up @@ -347,6 +363,16 @@ def test_forward_pooling():
strides=[2, 2, 2],
)

_test_pooling_dynamic(
input_shape=[1, None, None, 3],
np_shape=[1, 32, 32, 3],
window_shape=[2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)

# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
Expand Down

0 comments on commit d62f764

Please sign in to comment.