Skip to content

Commit

Permalink
Change support level to 10
Browse files Browse the repository at this point in the history
  • Loading branch information
kevinthesun committed Apr 29, 2019
1 parent fea9037 commit c357607
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 48 deletions.
4 changes: 2 additions & 2 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,7 @@ def global_avg_pool2d(data,
def adaptive_max_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive max pooling operator.
r"""2D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
Expand Down Expand Up @@ -439,7 +439,7 @@ def adaptive_max_pool2d(data,
def adaptive_avg_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive average pooling operator.
r"""2D adaptive average pooling operator. This operator is experimental.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
Expand Down
4 changes: 2 additions & 2 deletions src/relay/op/nn/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ RELAY_REGISTER_OP("nn.adaptive_avg_pool2d")
.set_attrs_type_key("relay.attrs.AdaptivePool2DAttrs")
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.set_support_level(10)
.add_type_rel("AdaptiveAvgPool2D", AdaptivePool2DRel)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
Pool2DInferCorrectLayout<AdaptivePool2DAttrs>)
Expand Down Expand Up @@ -550,7 +550,7 @@ RELAY_REGISTER_OP("nn.adaptive_max_pool2d")
.set_attrs_type_key("relay.attrs.AdaptivePool2DAttrs")
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.set_support_level(10)
.add_type_rel("AdaptiveMaxPool2D", AdaptivePool2DRel)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
Pool2DInferCorrectLayout<AdaptivePool2DAttrs>)
Expand Down
43 changes: 43 additions & 0 deletions tests/python/relay/test_op_level10.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,50 @@ def test_shape_of():
tvm.testing.assert_allclose(op_res.asnumpy(),
np.array(shape).astype('int32'))

def verify_adaptive_pool2d(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
def start_index(index, odim, idim):
return int(np.floor(index * idim / odim))

def end_index(index, odim, idim):
return int(np.ceil((index + 1) * idim / odim))

np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
n, c, h, w = dshape
oh, ow = out_size
oshape = (n, c) + out_size
np_out = np.zeros(oshape).astype(dtype)
np_op = np.mean if pool_type == "avg" else np.max
for i in range(n):
for j in range(c):
for k in range(oh):
k_start = start_index(k, oh, h)
k_end = end_index(k, oh, h)
k_sl = slice(k_start, k_end)
for l in range(ow):
l_start = start_index(l, ow, w)
l_end = end_index(l, ow, w)
l_sl = slice(l_start, l_end)
np_out[i, j, k, l] = np_op(np_data[i, j, k_sl, l_sl])

opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else relay.nn.adaptive_max_pool2d
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, out_size, layout)
func = relay.Function([x], y)

for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
relay_out = intrp1.evaluate(func)(np_data)
tvm.testing.assert_allclose(relay_out.asnumpy(), np_out, rtol=1e-5, atol=1e-5)

def test_adaptive_pool2d():
verify_adaptive_pool2d((1, 9, 224, 224), (1, 1), "max")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg")
verify_adaptive_pool2d((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool2d((1, 5, 46, 97), (4, 96), "avg")


if __name__ == "__main__":
test_adaptive_pool2d()
test_collapse_sum_like()
test_broadcast_to_like()
test_slice_like()
Expand Down
44 changes: 0 additions & 44 deletions tests/python/relay/test_op_level2.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,49 +293,6 @@ def test_avg_pool2d_no_count_pad():
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)


def verify_adaptive_pool2d(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
def start_index(index, odim, idim):
return int(np.floor(index * idim / odim))

def end_index(index, odim, idim):
return int(np.ceil((index + 1) * idim / odim))

np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
n, c, h, w = dshape
oh, ow = out_size
oshape = (n, c) + out_size
np_out = np.zeros(oshape).astype(dtype)
np_op = np.mean if pool_type == "avg" else np.max
for i in range(n):
for j in range(c):
for k in range(oh):
k_start = start_index(k, oh, h)
k_end = end_index(k, oh, h)
k_sl = slice(k_start, k_end)
for l in range(ow):
l_start = start_index(l, ow, w)
l_end = end_index(l, ow, w)
l_sl = slice(l_start, l_end)
np_out[i, j, k, l] = np_op(np_data[i, j, k_sl, l_sl])

opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else relay.nn.adaptive_max_pool2d
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, out_size, layout)
func = relay.Function([x], y)

for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
relay_out = intrp1.evaluate(func)(np_data)
tvm.testing.assert_allclose(relay_out.asnumpy(), np_out, rtol=1e-5, atol=1e-5)

def test_adaptive_pool2d():
verify_adaptive_pool2d((1, 9, 224, 224), (1, 1), "max")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg")
verify_adaptive_pool2d((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool2d((1, 5, 46, 97), (4, 96), "avg")


def test_flatten_infer_type():
d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
Expand Down Expand Up @@ -524,7 +481,6 @@ def test_upsampling():
if __name__ == "__main__":
test_pool2d()
test_avg_pool2d_no_count_pad()
test_adaptive_pool2d()
test_lrn()
test_l2_normalize()
test_conv2d_infer_type()
Expand Down

0 comments on commit c357607

Please sign in to comment.