Skip to content

Commit

Permalink
Miscellaneous fix for several numpy issues (apache#16664)
Browse files Browse the repository at this point in the history
* fix behavior of np.array when given official numpy ndarray

* bool for expand_dims and cast

* recover original Makefile

* address comments

* add boolean support for cumsum

* add gpu cast boolean support

* add error message
  • Loading branch information
haojin2 authored and yajiedesign committed Nov 6, 2019
1 parent 9ddea08 commit 270346e
Show file tree
Hide file tree
Showing 10 changed files with 53 additions and 28 deletions.
21 changes: 13 additions & 8 deletions python/mxnet/numpy/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ def __setitem__(self, key, value):
if not isinstance(key, tuple) or len(key) != 0:
raise IndexError('scalar tensor can only accept `()` as index')
if isinstance(value, numeric_types):
self.full(value)
self._full(value)
elif isinstance(value, ndarray) and value.size == 1:
if value.shape != self.shape:
value = value.reshape(self.shape)
Expand Down Expand Up @@ -1993,15 +1993,20 @@ def array(object, dtype=None, ctx=None):
"""
if ctx is None:
ctx = current_context()
if isinstance(object, ndarray):
if isinstance(object, (ndarray, _np.ndarray)):
dtype = object.dtype if dtype is None else dtype
elif isinstance(object, NDArray):
raise ValueError("If you're trying to create a mxnet.numpy.ndarray "
"from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.")
else:
dtype = _np.float32 if dtype is None else dtype
if not isinstance(object, (ndarray, _np.ndarray)):
try:
object = _np.array(object, dtype=dtype)
except Exception as e:
raise TypeError('{}'.format(str(e)))
if dtype is None:
dtype = object.dtype if hasattr(object, "dtype") else _np.float32
try:
object = _np.array(object, dtype=dtype)
except Exception as e:
# printing out the error raised by official NumPy's array function
# for transparency on users' side
raise TypeError('{}'.format(str(e)))
ret = empty(object.shape, dtype=dtype, ctx=ctx)
if len(object.shape) == 0:
ret[()] = object
Expand Down
2 changes: 1 addition & 1 deletion src/ndarray/ndarray_function.cu
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ void Copy<gpu, gpu>(const TBlob &from, TBlob *to,
from.FlatTo1D<gpu, DType>(s),
s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, {
to->FlatTo1D<gpu, DType>(s) =
mshadow::expr::tcast<DType>(from.FlatTo1D<gpu, SrcDType>(s));
})
Expand Down
2 changes: 1 addition & 1 deletion src/operator/mxnet_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -671,7 +671,7 @@ template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
Expand Down
4 changes: 2 additions & 2 deletions src/operator/numpy/np_cumsum-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ void CumsumForwardImpl(const OpContext& ctx,
}

Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(in.type_flag_, IType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(in.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(out.type_flag_, OType, {
Kernel<cumsum_forward, xpu>::Launch(
s, out.Size() / middle, out.dptr<OType>(),
Expand Down Expand Up @@ -157,7 +157,7 @@ void CumsumBackwardImpl(const OpContext& ctx,
}
}
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(igrad.type_flag_, IType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(igrad.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(ograd.type_flag_, OType, {
Kernel<cumsum_backward, xpu>::Launch(
s, igrad.Size() / middle, igrad.dptr<IType>(),
Expand Down
3 changes: 3 additions & 0 deletions src/operator/numpy/np_cumsum.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ inline bool CumsumType(const nnvm::NodeAttrs& attrs,
} else {
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
if (out_attrs->at(0) == mshadow::kBool) {
(*out_attrs)[0] = mshadow::kInt64;
}
}

return out_attrs->at(0) != -1 && in_attrs->at(0) != -1;
Expand Down
2 changes: 1 addition & 1 deletion src/operator/numpy/np_init_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ void IdentityCompute(const nnvm::NodeAttrs& attrs,
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& out_data = outputs[0];
int n = out_data.shape_[0];
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<identity<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), n);
Expand Down
2 changes: 1 addition & 1 deletion src/operator/tensor/elemwise_unary_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ void CastCompute(const nnvm::NodeAttrs& attrs,
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DstDType, {
Tensor<xpu, 1, DstDType> out = outputs[0].FlatTo1D<xpu, DstDType>(s);
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, SrcDType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, SrcDType, {
Tensor<xpu, 1, SrcDType> data = inputs[0].FlatTo1D<xpu, SrcDType>(s);
if (outputs[0].type_flag_ != inputs[0].type_flag_ ||
req[0] != kWriteInplace) {
Expand Down
5 changes: 3 additions & 2 deletions tests/python/unittest/test_numpy_interoperability.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ def _add_workload_copy():

def _add_workload_expand_dims():
OpArgMngr.add_workload('expand_dims', np.random.uniform(size=(4, 1)), -1)
OpArgMngr.add_workload('expand_dims', np.random.uniform(size=(4, 1)) > 0.5, -1)
for axis in range(-5, 4):
OpArgMngr.add_workload('expand_dims', np.empty((2, 3, 4, 5)), axis)

Expand Down Expand Up @@ -852,8 +853,8 @@ def _signs(dt):
# test_float_remainder_corner_cases
# Check remainder magnitude.
for ct in _FLOAT_DTYPES:
b = _np.array(1.0)
a = np.array(_np.nextafter(_np.array(0.0), -b), dtype=ct)
b = _np.array(1.0, dtype=ct)
a = np.array(_np.nextafter(_np.array(0.0, dtype=ct), -b), dtype=ct)
b = np.array(b, dtype=ct)
OpArgMngr.add_workload('remainder', a, b)
OpArgMngr.add_workload('remainder', -a, -b)
Expand Down
28 changes: 17 additions & 11 deletions tests/python/unittest/test_numpy_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
import itertools
import os
import unittest
import numpy as _np
Expand Down Expand Up @@ -87,17 +88,20 @@ def test_np_array_creation():
[],
(),
[[1, 2], [3, 4]],
_np.random.randint(-10, 10, size=rand_shape_nd(3)),
_np.random.uniform(size=rand_shape_nd(3)),
_np.random.uniform(size=(3, 0, 4))
]
for dtype in dtypes:
for src in objects:
mx_arr = np.array(src, dtype=dtype)
assert mx_arr.ctx == mx.current_context()
if dtype is None:
dtype = src.dtype if isinstance(src, _np.ndarray) else _np.float32
if isinstance(src, mx.nd.NDArray):
np_arr = _np.array(src.asnumpy(), dtype=dtype if dtype is not None else _np.float32)
np_arr = _np.array(src.asnumpy(), dtype=dtype)
else:
np_arr = _np.array(src, dtype=dtype if dtype is not None else _np.float32)
np_arr = _np.array(src, dtype=dtype)
assert mx_arr.dtype == np_arr.dtype
assert same(mx_arr.asnumpy(), np_arr)

Expand Down Expand Up @@ -471,9 +475,6 @@ def test_np_grad_ndarray_type():
@with_seed()
@use_np
def test_np_ndarray_astype():
mx_data = np.array([2, 3, 4, 5], dtype=_np.int32)
np_data = mx_data.asnumpy()

class TestAstype(HybridBlock):
def __init__(self, dtype, copy):
super(TestAstype, self).__init__()
Expand All @@ -483,24 +484,29 @@ def __init__(self, dtype, copy):
def hybrid_forward(self, F, x):
return x.astype(dtype=self._dtype, copy=self._copy)

def check_astype_equal(dtype, copy, expect_zero_copy=False, hybridize=False):
test_astype = TestAstype(dtype, copy)
def check_astype_equal(itype, otype, copy, expect_zero_copy=False, hybridize=False):
expect_zero_copy = copy is False and itype == otype
mx_data = np.array([2, 3, 4, 5], dtype=itype)
np_data = mx_data.asnumpy()
test_astype = TestAstype(otype, copy)
if hybridize:
test_astype.hybridize()
mx_ret = test_astype(mx_data)
assert type(mx_ret) is np.ndarray
np_ret = np_data.astype(dtype=dtype, copy=copy)
np_ret = np_data.astype(dtype=otype, copy=copy)
assert mx_ret.dtype == np_ret.dtype
assert same(mx_ret.asnumpy(), np_ret)
if expect_zero_copy and not hybridize:
assert id(mx_ret) == id(mx_data)
assert id(np_ret) == id(np_data)

for dtype in [np.int8, np.uint8, np.int32, np.float16, np.float32, np.float64, np.bool, np.bool_,
'int8', 'uint8', 'int32', 'float16', 'float32', 'float64', 'bool']:
dtypes = [np.int8, np.uint8, np.int32, np.float16, np.float32, np.float64, np.bool, np.bool_,
'int8', 'uint8', 'int32', 'float16', 'float32', 'float64', 'bool']

for itype, otype in itertools.product(dtypes, dtypes):
for copy in [True, False]:
for hybridize in [True, False]:
check_astype_equal(dtype, copy, copy is False and mx_data.dtype == dtype, hybridize)
check_astype_equal(itype, otype, copy, hybridize)


@with_seed()
Expand Down
12 changes: 11 additions & 1 deletion tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ def is_int(dtype):
for axis in ([i for i in range(in_data_dim)] + [(), None]):
for itype in ['float16', 'float32', 'float64', 'int8', 'int32', 'int64', 'bool']:
for dtype in ['float16', 'float32', 'float64', 'int8', 'int32', 'int64']:
if (is_int(dtype) and not is_int(itype))\
if (is_int(dtype) and not is_int(itype)) or (is_windows and is_int(itype))\
or (itype == 'bool' and\
(dtype not in ('float32', 'float64', 'int32', 'int64') or is_windows)):
continue
Expand Down Expand Up @@ -2390,6 +2390,16 @@ def hybrid_forward(self, F, a):
np_out = _np.cumsum(x.asnumpy(), axis=axis, dtype=otype)
assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5)

for shape in shapes:
for axis in [None] + [i for i in range(0, len(shape))]:
for otype in [None, _np.int32, _np.int64]:
for itype in [_np.bool, _np.int8, _np.int32, _np.int64]:
x = rand_ndarray(shape).astype(itype).as_np_ndarray()
np_out = _np.cumsum(x.asnumpy(), axis=axis, dtype=otype)
mx_out = np.cumsum(x, axis=axis, dtype=otype)
assert mx_out.shape == np_out.shape
assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5)


@with_seed()
@use_np
Expand Down

0 comments on commit 270346e

Please sign in to comment.