Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Pad Operator Type Support #12035

Merged
merged 7 commits into from
Aug 20, 2018
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions src/operator/pad-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,17 @@ class PadProp : public OperatorProperty {
return param_.__DICT__();
}

bool InferType(std::vector<int> *in_type,
std::vector<int> *out_type,
std::vector<int> *aux_type) const override {
int dtype = (*in_type)[0];
type_assign(&dtype, (*out_type)[0]);

TYPE_ASSIGN_CHECK(*in_type, 0, dtype);
TYPE_ASSIGN_CHECK(*out_type, 0, dtype);
return dtype != -1;
}

bool InferShape(std::vector<TShape> *in_shape, std::vector<TShape> *out_shape,
std::vector<TShape> *aux_shape) const override {
using namespace mshadow;
Expand Down
24 changes: 14 additions & 10 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2905,16 +2905,16 @@ def test_roipooling():
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)


def check_pad_with_shape(shape, xpu, pad_width, mode):
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X')
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu()).copyto(xpu)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu)
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
Expand All @@ -2926,16 +2926,20 @@ def check_pad_with_shape(shape, xpu, pad_width, mode):

@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
check_pad_with_shape(shape1, default_context(), pad1, 'constant')
check_pad_with_shape(shape1, default_context(), pad1, 'edge')
check_pad_with_shape(shape2, default_context(), pad2, 'constant')
check_pad_with_shape(shape2, default_context(), pad2, 'edge')
check_pad_with_shape(shape1, default_context(), pad1, 'reflect')
check_pad_with_shape(shape2, default_context(), pad2, 'reflect')
# note: this op doesn't support ints yet. Add tests when supported
test_types = ["float16", "float32", "float64"]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should be

dtypes = [np.float16, np.float32, np.float64]

?

for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)


def np_instance_norm(data, weight, bias, eps):
Expand Down