diff --git a/python/mxnet/gluon/block.py b/python/mxnet/gluon/block.py index 807f160baa56..136289136d61 100644 --- a/python/mxnet/gluon/block.py +++ b/python/mxnet/gluon/block.py @@ -551,7 +551,7 @@ def __call__(self, *args): for hook in self._forward_hooks.values(): hook(self, args, out) - if _mx_np.is_np_compat(): + if _mx_np.is_np_shape(): _check_all_np_ndarrays(_flatten(out, "output")[0]) return out diff --git a/python/mxnet/gluon/parameter.py b/python/mxnet/gluon/parameter.py index 307fb15bd1b7..2d3e8c05462f 100644 --- a/python/mxnet/gluon/parameter.py +++ b/python/mxnet/gluon/parameter.py @@ -31,7 +31,7 @@ from ..context import Context, cpu from .. import autograd from .utils import _indent, _brief_print_list, shape_is_known -from .. import is_np_shape +from ..util import is_np_shape # pylint: disable= invalid-name tensor_types = (symbol.Symbol, ndarray.NDArray) @@ -188,7 +188,7 @@ def shape(self, new_shape): if self._shape is None: self._shape = new_shape return - unknown_dim_size = -1 if is_np_compat() else 0 + unknown_dim_size = -1 if is_np_shape() else 0 assert len(self._shape) == len(new_shape) and \ all(j in (unknown_dim_size, i) for i, j in zip(new_shape, self._shape)), \ "Expected shape %s is incompatible with given shape %s."%( @@ -330,7 +330,7 @@ def _finish_deferred_init(self): initializer.create(default_init)( initializer.InitDesc(self.name, {'__init__': init}), data) # TODO(junwu): use np random operators when available - if is_np_compat(): + if is_np_shape(): data = data.as_np_ndarray() # convert to np.ndarray self._init_impl(data, ctx) @@ -357,7 +357,7 @@ def _init_grad(self): self._grad = [ndarray.zeros(shape=i.shape, dtype=i.dtype, ctx=i.context, stype=self._grad_stype) for i in self._data] # TODO(junwu): use np.zeros - if is_np_compat(): + if is_np_shape(): self._grad = [arr.as_np_ndarray() for arr in self._grad] autograd.mark_variables(self._check_and_get(self._data, list), @@ -606,7 +606,7 @@ def var(self): self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype, lr_mult=self.lr_mult, wd_mult=self.wd_mult, init=self.init, stype=self._stype) - if is_np_compat(): + if is_np_shape(): self._var = self._var.as_np_ndarray() return self._var diff --git a/python/mxnet/gluon/utils.py b/python/mxnet/gluon/utils.py index acfcce2ae3de..b21e06dbeabf 100644 --- a/python/mxnet/gluon/utils.py +++ b/python/mxnet/gluon/utils.py @@ -40,6 +40,7 @@ class requests_failed_to_import(object): from .. import ndarray from ..util import is_np_shape + def split_data(data, num_slice, batch_axis=0, even_split=True): """Splits an NDArray into `num_slice` slices along `batch_axis`. Usually used for data parallelism where each slices is sent diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 725fba4c1cf1..72b890d6016b 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -20,7 +20,7 @@ from __future__ import absolute_import import numpy as _np from ...base import numeric_types -from ...util import _sanity_check_params, use_np_compat, set_module +from ...util import _sanity_check_params, set_module from ...context import current_context from . import _internal as _npi @@ -90,7 +90,6 @@ def ones(shape, dtype=None, **kwargs): #pylint: disable= too-many-arguments, no-member, protected-access -@use_np_compat def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None): """ Helper function for element-wise operation. The function will perform numpy-like broadcasting if needed and call different functions. diff --git a/python/mxnet/ndarray/register.py b/python/mxnet/ndarray/register.py index e93a74c5bf17..c2225bb4fde6 100644 --- a/python/mxnet/ndarray/register.py +++ b/python/mxnet/ndarray/register.py @@ -25,7 +25,7 @@ from ..ndarray_doc import _build_doc from ..base import mx_uint, check_call, _LIB, py_str, _init_op_module, _Null, _is_np_op # pylint: disable=unused-import -from ..util import use_np_compat # pylint: disable=unused-import +from ..util import use_np_shape # pylint: disable=unused-import def _verify_all_np_ndarrays(op_name, func_name, args, out): @@ -176,7 +176,7 @@ def _generate_ndarray_function_code(handle, op_name, func_name, signature_only=F if is_np_op: doc_str_idx = 2 code.append(""" -@use_np_compat""") +@use_np_shape""") if arr_name: code.append(""" def %s(*%s, **kwargs):"""%(func_name, arr_name)) diff --git a/python/mxnet/numpy/__init__.py b/python/mxnet/numpy/__init__.py index 6d6ac6ad465c..6f1c02d6462b 100644 --- a/python/mxnet/numpy/__init__.py +++ b/python/mxnet/numpy/__init__.py @@ -26,6 +26,6 @@ from . import _op from . import _register from ._op import * # pylint: disable=wildcard-import -from ..util import use_np_compat, set_np_compat, np_compat, is_np_compat +from ..util import use_np_shape, set_np_shape, np_shape, is_np_shape __all__ = [] diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index f5a3b83ba485..e9afd2392e5a 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -30,7 +30,7 @@ from . import _op as _mx_np_op from ..base import check_call, _LIB, NDArrayHandle from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types -from ..util import _sanity_check_params, set_module, use_np_compat +from ..util import _sanity_check_params, set_module, use_np_shape from ..context import current_context from ..ndarray import numpy as _mx_nd_np from ..ndarray.numpy import _internal as _npi @@ -75,7 +75,7 @@ def _np_ndarray_cls(handle, writable=True, stype=0): @set_module('mxnet.numpy') # pylint: disable=invalid-name -@use_np_compat +@use_np_shape class ndarray(NDArray): """An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the format of each element in the array @@ -1140,7 +1140,6 @@ def ndim(self): return len(self.shape) @property - @use_np_compat def size(self): """Number of elements in the array.""" return super(ndarray, self).size @@ -1150,7 +1149,6 @@ def tostype(self, stype): @set_module('mxnet.numpy') -@use_np_compat def empty(shape, dtype=None, **kwargs): """Return a new array of given shape and type, without initializing entries. @@ -1183,7 +1181,7 @@ def empty(shape, dtype=None, **kwargs): @set_module('mxnet.numpy') -@use_np_compat +@use_np_shape def array(object, dtype=None, **kwargs): """ Create an array. diff --git a/tests/python/unittest/test_numpy_gluon.py b/tests/python/unittest/test_numpy_gluon.py index 446f5b8c9672..b7656b75feb7 100644 --- a/tests/python/unittest/test_numpy_gluon.py +++ b/tests/python/unittest/test_numpy_gluon.py @@ -44,7 +44,7 @@ def __init__(self): def hybrid_forward(self, F, x, w): return F.dot(x, w) - @np.use_np_compat + @np.use_np_shape class TestBlock2(gluon.HybridBlock): def __init__(self): super(TestBlock2, self).__init__() @@ -62,7 +62,7 @@ def hybrid_forward(self, F, x, w): def test_optimizer_with_np_ndarrays(): - @np.use_np_compat + @np.use_np_shape class LinearRegression(gluon.HybridBlock): def __init__(self, num_input_dim=-1, num_hidden_dim=100, num_output_dim=10): super(LinearRegression, self).__init__() @@ -78,7 +78,7 @@ def hybrid_forward(self, F, x, w1, w2): y_pred = h_relu.dot(w2) # equivalent to F.np.dot(h_relu, w2) return y_pred - @np.use_np_compat + @np.use_np_shape class TotalLoss(gluon.HybridBlock): def hybrid_forward(self, F, pred, label): return ((pred - label) ** 2).sum() # equivalent to F.np.sum(F.np.square(pred - label)) diff --git a/tests/python/unittest/test_numpy_ndarray.py b/tests/python/unittest/test_numpy_ndarray.py index 7ffa77438e64..188cb6f3393a 100644 --- a/tests/python/unittest/test_numpy_ndarray.py +++ b/tests/python/unittest/test_numpy_ndarray.py @@ -47,7 +47,7 @@ def test_array_creation(): @with_seed() def test_zeros(): # test np.zeros in Gluon - @np.use_np_compat + @np.use_np_shape class TestZeros(HybridBlock): def __init__(self, shape, dtype=None): super(TestZeros, self).__init__() @@ -57,13 +57,13 @@ def __init__(self, shape, dtype=None): def hybrid_forward(self, F, x, *args, **kwargs): return x + F.np.zeros(shape, dtype) - @np.use_np_compat + @np.use_np_shape class TestZerosOutputType(HybridBlock): def hybrid_forward(self, F, x, *args, **kwargs): return x, F.np.zeros(shape=()) # test np.zeros in imperative - @np.use_np_compat + @np.use_np_shape def check_zero_array_creation(shape, dtype): np_out = _np.zeros(shape=shape, dtype=dtype) mx_out = np.zeros(shape=shape, dtype=dtype) @@ -97,7 +97,7 @@ def check_zero_array_creation(shape, dtype): @with_seed() def test_ones(): # test np.ones in Gluon - @np.use_np_compat + @np.use_np_shape class TestOnes(HybridBlock): def __init__(self, shape, dtype=None): super(TestOnes, self).__init__() @@ -107,13 +107,13 @@ def __init__(self, shape, dtype=None): def hybrid_forward(self, F, x, *args, **kwargs): return x * F.np.ones(shape, dtype) - @np.use_np_compat + @np.use_np_shape class TestOnesOutputType(HybridBlock): def hybrid_forward(self, F, x, *args, **kwargs): return x, F.np.ones(shape=()) # test np.ones in imperative - @np.use_np_compat + @np.use_np_shape def check_ones_array_creation(shape, dtype): np_out = _np.ones(shape=shape, dtype=dtype) mx_out = np.ones(shape=shape, dtype=dtype) @@ -156,7 +156,7 @@ def test_ndarray_binary_element_wise_ops(): def get_np_ret(x1, x2, op): return np_op_map[op](x1, x2) - @np.use_np_compat + @np.use_np_shape class TestBinaryElementWiseOp(HybridBlock): def __init__(self, op, scalar=None, reverse=False): super(TestBinaryElementWiseOp, self).__init__() @@ -219,7 +219,7 @@ def hybrid_forward(self, F, x, *args): print(self._op) assert False - @np.use_np_compat + @np.use_np_shape def check_binary_op_result(shape1, shape2, op, dtype=None): if shape1 is None: mx_input1 = abs(_np.random.uniform()) + 1 @@ -289,7 +289,7 @@ def check_binary_op_result(shape1, shape2, op, dtype=None): @with_seed() def test_hybrid_block_multiple_outputs(): - @np.use_np_compat + @np.use_np_shape class TestAllNumpyOutputs(HybridBlock): def hybrid_forward(self, F, x, *args, **kwargs): return F.npe.relu(x), F.np.sum(x) @@ -309,7 +309,7 @@ def hybrid_forward(self, F, x, *args, **kwargs): assert type(out1) is expected_out_type assert type(out2) is expected_out_type - @np.use_np_compat + @np.use_np_shape class TestMixedTypeOutputsFailure(HybridBlock): def hybrid_forward(self, F, x, *args, **kwargs): return F.relu(x.as_classic_ndarray()), F.np.sum(x) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index e1993923ebf5..e43b91ff6a64 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -88,7 +88,7 @@ def is_int(dtype): @with_seed() -@np.use_np_compat +@np.use_np_shape def test_np_dot(): shapes = [ ((3, 0), (0, 4)), @@ -132,7 +132,7 @@ def test_np_dot(): @with_seed() def test_np_mean(): - @np.use_np_compat + @np.use_np_shape class TestMean(HybridBlock): def __init__(self, axis=None, dtype=None, keepdims=False): super(TestMean, self).__init__() @@ -194,7 +194,7 @@ def is_int(dtype): @with_seed() -@np.use_np_compat +@np.use_np_shape def test_np_transpose(): # TODO(junwu): Add more test cases data = mx.sym.var('a').as_np_ndarray() @@ -224,7 +224,7 @@ def test_np_transpose(): @with_seed() -@np.use_np_compat +@np.use_np_shape def test_relu(): # TODO(junwu): Add more test cases data = mx.sym.var('data').as_np_ndarray() @@ -240,7 +240,7 @@ def test_relu(): @with_seed() -@np.use_np_compat +@np.use_np_shape def test_sigmoid(): # TODO(junwu): Add more test cases data = mx.sym.var('data').as_np_ndarray() @@ -256,7 +256,7 @@ def test_sigmoid(): @with_seed() -@np.use_np_compat +@np.use_np_shape def test_np_reshape(): # TODO(junwu): Add more test cases data = mx.sym.var('a').as_np_ndarray() @@ -272,7 +272,7 @@ def test_np_reshape(): @with_seed() -@np.use_np_compat +@np.use_np_shape def test_np_maximum(): # TODO(junwu): Add more test cases x1, x2 = mx.sym.var('x1').as_np_ndarray(), mx.sym.var('x2').as_np_ndarray() @@ -293,7 +293,7 @@ def check_maximum(x1, x2): @with_seed() -@np.use_np_compat +@np.use_np_shape def test_np_minimum(): # TODO(junwu): Add more test cases x1, x2 = mx.sym.var('x1').as_np_ndarray(), mx.sym.var('x2').as_np_ndarray()