diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 02e42145fb18..874ddc97abae 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -28,7 +28,7 @@ from . import _internal as _npi from ..ndarray import NDArray -__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'invert', +__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'invert', 'delete', 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', @@ -913,6 +913,67 @@ def mod(x1, x2, out=None, **kwargs): return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out) +@set_module('mxnet.ndarray.numpy') +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : ndarray + Input array. + obj : slice, int or ndarray of ints + Indicate indices of sub-arrays to remove along the specified axis. + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + Examples + -------- + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1., 2., 3., 4.], + [ 5., 6., 7., 8.], + [ 9., 10., 11., 12.]]) + + >>> np.delete(arr, 1, 0) + array([[ 1., 2., 3., 4.], + [ 9., 10., 11., 12.]]) + + >>> np.delete(arr, slice(None, None, 2), 1) + array([[ 2., 4.], + [ 6., 8.], + [10., 12.]]) + + >>> np.delete(arr, np.array([1,3,5]), None) + array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.]) + >>> np.delete(arr, np.array([1,1,5]), None) + array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.]) + """ + if not isinstance(arr, NDArray): + raise TypeError("'arr' can not support type {}".format(str(type(arr)))) + if isinstance(obj, slice): + start = obj.start + stop = obj.stop + step = 1 if obj.step is None else obj.step + return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis) + elif isinstance(obj, integer_types): + return _npi.delete(arr, int_ind=obj, axis=axis) + elif isinstance(obj, NDArray): + return _npi.delete(arr, obj, axis=axis) + else: + raise TypeError("'obj' can not support type {}".format(str(type(obj)))) + + @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def remainder(x1, x2, out=None): diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 4910b4d6b925..689a5b1322c6 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -47,7 +47,7 @@ from ..ndarray.ndarray import _storage_type __all__ = ['ndarray', 'empty', 'array', 'shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', - 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not', + 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not', 'delete', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'invert', 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'histogram', @@ -5848,6 +5848,55 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) +@set_module('mxnet.numpy') +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : ndarray + Input array. + obj : slice, int or ndarray of ints + Indicate indices of sub-arrays to remove along the specified axis. + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + Examples + -------- + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1., 2., 3., 4.], + [ 5., 6., 7., 8.], + [ 9., 10., 11., 12.]]) + + >>> np.delete(arr, 1, 0) + array([[ 1., 2., 3., 4.], + [ 9., 10., 11., 12.]]) + + >>> np.delete(arr, slice(None, None, 2), 1) + array([[ 2., 4.], + [ 6., 8.], + [10., 12.]]) + + >>> np.delete(arr, np.array([1,3,5]), None) + array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.]) + >>> np.delete(arr, np.array([1,1,5]), None) + array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.]) + """ + return _mx_nd_np.delete(arr, obj, axis=axis) + + @set_module('mxnet.numpy') def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments """ diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index c7e9dd1398eb..43161409838a 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -125,6 +125,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'transpose', 'unique', 'unravel_index', + 'delete', 'var', 'vdot', 'vstack', diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 6efc333cc16c..bda1c52c796c 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -36,7 +36,7 @@ except ImportError: from builtins import slice as py_slice -__all__ = ['zeros', 'zeros_like', 'ones', 'ones_like', 'full_like', 'bitwise_not', 'invert', +__all__ = ['zeros', 'zeros_like', 'ones', 'ones_like', 'full_like', 'bitwise_not', 'invert', 'delete', 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', @@ -3161,6 +3161,45 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None): return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx) +@set_module('mxnet.symbol.numpy') +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : _Symbol + Input array. + obj : slice, scaler or _Symbol of ints + Indicate indices of sub-arrays to remove along the specified axis. + axis : scaler, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : _Symbol + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + """ + if not isinstance(arr, Symbol): + raise TypeError("'arr' can not support type {}".format(str(type(arr)))) + if isinstance(obj, slice): + start = obj.start + stop = obj.stop + step = 1 if obj.step is None else obj.step + return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis) + elif isinstance(obj, integer_types): + return _npi.delete(arr, int_ind=obj, axis=axis) + elif isinstance(obj, Symbol): + return _npi.delete(arr, obj, axis=axis) + else: + raise TypeError("'obj' can not support type {}".format(str(type(obj)))) + + # pylint: disable=redefined-outer-name @set_module('mxnet.symbol.numpy') def split(ary, indices_or_sections, axis=0): diff --git a/src/operator/numpy/np_delete_op-inl.h b/src/operator/numpy/np_delete_op-inl.h new file mode 100644 index 000000000000..a144833f3294 --- /dev/null +++ b/src/operator/numpy/np_delete_op-inl.h @@ -0,0 +1,347 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_delete_op-inl.h + * \brief Function definition of delete operators + */ +#ifndef MXNET_OPERATOR_NUMPY_NP_DELETE_OP_INL_H_ +#define MXNET_OPERATOR_NUMPY_NP_DELETE_OP_INL_H_ + +#include +#include +#include +#include "../../common/utils.h" +#include "../tensor/sort_op.h" +#include "../tensor/init_op.h" +#include "../operator_common.h" +#include "../mxnet_op.h" +#include "../tensor/broadcast_reduce_op.h" +#ifdef __CUDACC__ +#include +#include +#include +#include +#include +#include +#endif + +namespace mxnet { +namespace op { + +struct NumpyDeleteParam : public dmlc::Parameter { + dmlc::optional start; + dmlc::optional stop; + dmlc::optional step; + dmlc::optional int_ind; + dmlc::optional axis; + DMLC_DECLARE_PARAMETER(NumpyDeleteParam) { + DMLC_DECLARE_FIELD(start) + .set_default(dmlc::optional()) + .describe("If 'obj' is slice, 'start' is one of it's arguments."); + DMLC_DECLARE_FIELD(stop) + .set_default(dmlc::optional()) + .describe("If 'obj' is slice, 'stop' is one of it's arguments."); + DMLC_DECLARE_FIELD(step) + .set_default(dmlc::optional()) + .describe("If 'obj' is slice, 'step' is one of it's arguments."); + DMLC_DECLARE_FIELD(int_ind) + .set_default(dmlc::optional()) + .describe("If 'obj' is int, 'int_ind' is the index before which" + "'values' is inserted"); + DMLC_DECLARE_FIELD(axis) + .set_default(dmlc::optional()) + .describe("Axis along which to insert `values`."); + } +}; + +namespace delete_ { + +enum DeleteOpInputs {kArr, kObj}; +enum DeleteOpOutputs {kOut}; +} // namespace delete_ + +struct SliceToIndices { + /*! + * \brief transfer slice to indices array + */ + template + MSHADOW_XINLINE static void Map(int i, IType* indices, int start, int step) { + indices[i] = start + i * step; + } +}; + +struct IsDeleteCal { + /*! + * \brief indicate which indices need to be deleted in input + * \param N used to check indices legality + * \param is_delete if is_delete[i] == False, index i needn't to be deleted from output + * if is_delete[i] == True, index i need to be deleted from output + * \param indices the indices need to be deleted + */ + template + MSHADOW_XINLINE static void Map(int i, int N, bool* is_delete, const IType* indices) { + if ((indices[i] >= 0) && (indices[i] < N)) { + is_delete[static_cast(indices[i])] = true; + } + } +}; + +struct OutPosCal { + /*! + * \brief map the index from input to output. e.g. + * \example original_position 0 1 2 3 4 + * is_delete F T T F F + * out_position 0 - - 1 2 + */ + MSHADOW_XINLINE static void Map(int i, int64_t* out_pos, const bool* is_delete) { + if (!is_delete[i]) { + int cnt = 0; + for (int j = 0; j < i; ++j) { + if (!is_delete[j]) { + cnt++; + } + } + out_pos[i] = cnt; + } + } +}; + +template +struct DeleteKernel { + /*! + * \brief delete a sub-array from input along an axis according to 'is_delete'. + * \param out_data - output: a new array with sub-arrays along an axis deleted. + * \param in_arr - input: 'arr', original array. + * \param is_delete - mark where will be deleted or be reminded in 'arr' + * \param out_pos - if is_delete[i] is 'false', out_pos[i] indicates its. + * \param arrshape - the shape of 'arr'. + * \param out_stride - the stride of 'out_data'. + * \param axis - delete sub-array along this axis + */ + template + MSHADOW_XINLINE static void Map(int i, DType* out_data, + const DType* in_arr, + const bool* is_delete, + const int64_t* out_pos, + const mshadow::Shape arrshape, + const mshadow::Shape out_stride, + const int axis) { + // i -> position in in_arr's shape + mshadow::Shape arr_idx = mxnet_op::unravel(i, arrshape); + if (!is_delete[arr_idx[axis]]) { + arr_idx[axis] = out_pos[arr_idx[axis]]; + int64_t dest_idx = mxnet_op::dot(arr_idx, out_stride); + KERNEL_ASSIGN(out_data[dest_idx], req, in_arr[i]); + } + } +}; + +/*! + * /brief equals to numpy's slice.indices(range) + * /param pstart - slice.start + * /param pstep - slice.step + * /param pstop - slice.stop + * /return start - slice.indices(range).start + * /return stop - slice.indices(range).stop + * /return step - slice.indices(range).step + * /return tot - total number of slice.indices(range) + */ +inline void SliceIndices(const dmlc::optional& pstart, + const dmlc::optional& pstop, + const dmlc::optional& pstep, + const int range, + int* start, int* stop, int* step, + size_t* tot) { + *step = pstep.has_value() ? pstep.value() : 1; + CHECK_NE(*step, 0) << "'step' can not equal to 0."; + if (pstop.has_value()) { + *stop = pstop.value(); + *stop += (*stop < 0) ? range : 0; + *stop = (*stop < 0) ? ((*step < 0) ? -1 : 0) : *stop; + *stop = (*stop >= range) ? ((*step < 0) ? range - 1 : range) : *stop; + } else { + *stop = (*step > 0) ? range : -1; + } + if (pstart.has_value()) { + *start = pstart.value(); + *start += (*start < 0) ? range : 0; + *start = (*start < 0) ? ((*step < 0) ? -1 : 0) : *start; + *start = (*start >= range) ? ((*step < 0) ? range - 1 : range) : *start; + } else { + *start = (*step > 0) ? 0 : range - 1; + } + if (*step > 0 && *stop >= *start) { + *tot = static_cast((*stop - *start + *step - 1) / *step); + } else if (*step < 0 && *stop <= *start) { + *tot = static_cast((*stop - *start + *step + 1) / *step); + } +} + +template +void NumpyDeleteCompute(const nnvm::NodeAttrs& attrs, + const OpContext &ctx, + const std::vector &inputs, + const std::vector &req, + const std::vector &outputs) { + using namespace mshadow; + using namespace mxnet_op; + + const NumpyDeleteParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), + (param.step.has_value() || param.int_ind.has_value()) ? 1U : 2U); + CHECK_EQ(outputs.size(), 1U); + CHECK_EQ(req.size(), 1U); + mshadow::Stream *s = ctx.get_stream(); + + int ndim = inputs[delete_::kArr].shape().ndim(); + int axis = param.axis.has_value() ? param.axis.value() : -1; + NDArray arr; // original array + + if (!param.axis.has_value()) { + arr = inputs[delete_::kArr].Reshape(Shape1(inputs[delete_::kArr].shape().Size())); + ndim = 1; + axis = -1; + } else { + arr = inputs[delete_::kArr]; + } + + if (ndim == 0) { + const_cast(outputs[delete_::kOut]).Init(arr.shape()); + mxnet_op::copy(s, outputs[delete_::kOut].data(), inputs[delete_::kArr].data()); + return; + } + + axis = CheckAxis(axis, ndim); + int N = (arr.shape())[axis]; + mxnet::TShape outshape(arr.shape()); + // if obj is slice, they're obj's arguments + int start = 0, stop = 0, step = 0; + // total number to be deleted + size_t numtodel = 0; + // if obj is scaler, index is it's value + int index = 0; + + if (param.step.has_value()) { // obj is slice + SliceIndices(param.start, param.stop, param.step, + N, &start, &stop, &step, &numtodel); + if (numtodel == 0) { + const_cast(outputs[delete_::kOut]).Init(arr.shape()); + mxnet_op::copy(s, outputs[delete_::kOut].data(), inputs[delete_::kArr].data()); + return; + } + outshape[axis] -= numtodel; + const_cast(outputs[delete_::kOut]).Init(outshape); + } else if (param.int_ind.has_value()) { // obj is scaler + index = param.int_ind.value(); + CHECK((index >= -1 * N) && (index < N)) + << "index " << index + << " is out of bounds for axis " << axis + << " with size " << N << "\n"; + index += ((index < 0) ? N : 0); + numtodel = static_cast(1); + outshape[axis] -= 1; + const_cast(outputs[delete_::kOut]).Init(outshape); + } else { // obj is tensor + numtodel = inputs[delete_::kObj].shape().Size(); + } + + char* out_pos_ptr = NULL; + char* indices_ptr = NULL; + char* is_delete_ptr = NULL; + MSHADOW_TYPE_SWITCH(((inputs.size() == 2U) ? // obj is tensor + inputs[delete_::kObj].dtype() : + mshadow::DataType::kFlag), IType, { + size_t temp_mem_size = sizeof(int64_t) * arr.shape()[axis] + + sizeof(IType) * numtodel + + sizeof(bool) * arr.shape()[axis]; + Tensor temp_mem = + ctx.requested[0].get_space_typed(Shape1(temp_mem_size), s); + out_pos_ptr = temp_mem.dptr_; + indices_ptr = out_pos_ptr + sizeof(int64_t) * arr.shape()[axis]; + is_delete_ptr = indices_ptr + sizeof(IType) * numtodel; + if (param.step.has_value()) { // obj is slice, transfer slice to tensor + Kernel::Launch( + s, numtodel, reinterpret_cast(indices_ptr), start, step); + } else if (param.int_ind.has_value()) { // obj is scaler, copy it to tensor + Fill(s, TBlob(reinterpret_cast(indices_ptr), + Shape1(numtodel), xpu::kDevMask), kWriteTo, index); + } else { // obj is tensor, copy it to a unified tensor + mxnet_op::copy(s, + TBlob(reinterpret_cast(indices_ptr), inputs[delete_::kObj].shape(), + inputs[delete_::kObj].data().dev_mask()), + inputs[delete_::kObj].data()); + } + mxnet_op::Kernel::Launch( + s, arr.shape()[axis], reinterpret_cast(is_delete_ptr)); + // mark which position need to be deleted from input arr + Kernel::Launch( + s, numtodel, N, reinterpret_cast(is_delete_ptr), + reinterpret_cast(indices_ptr)); + // calculate output data's original position in input arr + Kernel::Launch( + s, arr.shape()[axis], reinterpret_cast(out_pos_ptr), + reinterpret_cast(is_delete_ptr)); + }); + + if (inputs.size() == 2U) { // obj is tensor + // get total number of nonredundant indices + #ifdef __CUDACC__ + thrust::device_ptris_delete_dev(reinterpret_cast(is_delete_ptr)); + thrust::device_vectorvec_is_delete(is_delete_dev, is_delete_dev + arr.shape()[axis]); + #else + std::vectorvec_is_delete(reinterpret_cast(is_delete_ptr), + reinterpret_cast(is_delete_ptr) + arr.shape()[axis]); + #endif + numtodel = 0; + for (int i = 0; i < arr.shape()[axis]; ++i) { + if (vec_is_delete[i]) { + numtodel++; + } + } + outshape[axis] -= numtodel; + const_cast(outputs[delete_::kOut]).Init(outshape); + } + + MSHADOW_TYPE_SWITCH(((inputs.size() == 2U) ? // obj is tensor + inputs[delete_::kObj].dtype() : + mshadow::DataType::kFlag), IType, { + MXNET_NDIM_SWITCH(outshape.ndim(), ndim, { + mshadow::Shape out_strides = mxnet_op::calc_stride(outshape.get()); + MSHADOW_TYPE_SWITCH(outputs[delete_::kOut].dtype(), DType, { + MXNET_ASSIGN_REQ_SWITCH(req[delete_::kOut], req_type, { + Kernel, xpu>::Launch( + s, arr.shape().Size(), + outputs[delete_::kOut].data().dptr(), + arr.data().dptr(), + reinterpret_cast(is_delete_ptr), + reinterpret_cast(out_pos_ptr), + arr.shape().get(), + out_strides, axis); + }); + }); + }); + }); +} + +} // namespace op +} // namespace mxnet + +#endif // MXNET_OPERATOR_NUMPY_NP_DELETE_OP_INL_H_ diff --git a/src/operator/numpy/np_delete_op.cc b/src/operator/numpy/np_delete_op.cc new file mode 100644 index 000000000000..48840bf9d230 --- /dev/null +++ b/src/operator/numpy/np_delete_op.cc @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_delete_op.cc + * \brief CPU Implementation of numpy insert operations + */ + +#include +#include "./np_delete_op-inl.h" + +namespace mxnet { +namespace op { + +DMLC_REGISTER_PARAMETER(NumpyDeleteParam); + +bool NumpyDeleteType(const nnvm::NodeAttrs& attrs, + std::vector *in_type, + std::vector *out_type) { + const NumpyDeleteParam& param = nnvm::get(attrs.parsed); + int insize = (param.step.has_value() || param.int_ind.has_value()) ? 1 : 2; + CHECK_EQ(in_type->size(), insize); + CHECK_EQ(out_type->size(), 1U); + if (insize == 3) { + CHECK_NE((*in_type)[1], -1) << "Index type must be set for insert operator\n"; + CHECK(((*in_type)[1] == mshadow::DataType::kFlag) || + ((*in_type)[1] == mshadow::DataType::kFlag)) + << "Index type only support int32 or int64.\n"; + } + TYPE_ASSIGN_CHECK(*out_type, 0, (*in_type)[0]); + TYPE_ASSIGN_CHECK(*in_type, 0, (*out_type)[0]); + return (*in_type)[0] != -1; +} + +inline bool NumpyDeleteStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector *in_attrs, + std::vector *out_attrs) { + const NumpyDeleteParam& param = nnvm::get(attrs.parsed); + unsigned int insize = (param.step.has_value() || param.int_ind.has_value()) ? 1U : 2U; + CHECK_EQ(in_attrs->size(), insize); + CHECK_EQ(out_attrs->size(), 1U); + for (int &attr : *in_attrs) { + CHECK_EQ(attr, kDefaultStorage) << "Only default storage is supported"; + } + for (int &attr : *out_attrs) { + attr = kDefaultStorage; + } + *dispatch_mode = DispatchMode::kFComputeEx; + return true; +} + +NNVM_REGISTER_OP(_npi_delete) +.describe(R"code(Delete values along the given axis before the given indices.)code" ADD_FILELINE) +.set_attr_parser(ParamParser) +.set_num_inputs([](const NodeAttrs& attrs) { + const NumpyDeleteParam& params = nnvm::get(attrs.parsed); + return (params.step.has_value() || params.int_ind.has_value()) ? 1U : 2U; +}) +.set_num_outputs(1) +.set_attr("FListInputNames", + [](const NodeAttrs& attrs) { + const NumpyDeleteParam& params = nnvm::get(attrs.parsed); + return (params.step.has_value() || params.int_ind.has_value()) ? + std::vector{"arr"} : + std::vector{"arr", "obj"}; +}) +.set_attr("FInferType", NumpyDeleteType) +.set_attr("FComputeEx", NumpyDeleteCompute) +.set_attr("FInferStorageType", NumpyDeleteStorageType) +.set_attr("FResourceRequest", + [](const NodeAttrs& attrs) { + return std::vector{ResourceRequest::kTempSpace}; + }) +.add_argument("arr", "NDArray-or-Symbol", "Input ndarray") +.add_argument("obj", "NDArray-or-Symbol", "Input ndarray") +.add_arguments(NumpyDeleteParam::__FIELDS__()); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/numpy/np_delete_op.cu b/src/operator/numpy/np_delete_op.cu new file mode 100644 index 000000000000..599d01788138 --- /dev/null +++ b/src/operator/numpy/np_delete_op.cu @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License.ΓΈ + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_delete_op.cu + * \brief GPU Implementation of numpy delete operations + */ + +#include "./np_delete_op-inl.h" + +namespace mxnet { +namespace op { + +NNVM_REGISTER_OP(_npi_delete) +.set_attr("FComputeEx", NumpyDeleteCompute); + +} +} diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index fcdf547bfbec..0cc4f7cac96d 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -855,6 +855,27 @@ def _add_workload_unique(): # OpArgMngr.add_workload('unique', np.arange(10, dtype=np.uint8).reshape(-1, 2).astype(bool), axis=1) +def _add_workload_delete(): + a = np.arange(5) + nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + lims = [-6, -2, 0, 1, 2, 4, 5] + steps = [-3, -1, 1, 3] + for start in lims: + for stop in lims: + for step in steps: + s = slice(start, stop, step) + OpArgMngr.add_workload('delete', a, s) + OpArgMngr.add_workload('delete', nd_a, s, axis=1) + OpArgMngr.add_workload('delete', a, np.array([]), axis=0) + OpArgMngr.add_workload('delete', a, 0) + OpArgMngr.add_workload('delete', a, np.array([])) + OpArgMngr.add_workload('delete', a, np.array([0, 1])) + OpArgMngr.add_workload('delete', a, slice(1, 2)) + OpArgMngr.add_workload('delete', a, slice(1, -2)) + k = np.arange(10).reshape(2, 5) + OpArgMngr.add_workload('delete', k, slice(60, None), axis=1) + + def _add_workload_var(array_pool): OpArgMngr.add_workload('var', array_pool['4x1']) OpArgMngr.add_workload('var', np.array([np.float16(1.)])) @@ -1482,6 +1503,7 @@ def _prepare_workloads(): _add_workload_tile() _add_workload_transpose() _add_workload_unique() + _add_workload_delete() _add_workload_var(array_pool) _add_workload_zeros_like(array_pool) _add_workload_linalg_norm() diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index af9228d45991..99d104df1198 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2754,6 +2754,81 @@ def hybrid_forward(self, F, x): assert same(ret_mx.asnumpy(), ret_np) +@with_seed() +@use_np +def test_np_delete(): + class TestDelete(HybridBlock): + def __init__(self, obj, axis=None): + super(TestDelete, self).__init__() + self._obj = obj + self._axis = axis + + def hybrid_forward(self, F, a): + return F.np.delete(a, self._obj, axis=self._axis) + + def GetSize(shp): + if len(shp) == 0: + return 0 + else: + res = 1 + shp_list = list(shp) + for x in shp: + res *= x + return res + + def GetDimSize(shp, axis): + if axis is None: + return GetSize(shp) + shp_list = list(shp) + return shp_list[axis] + + shape = [(), (0, ), (1, ), (2, 3), (2, 1, 4, 5)] + config = [] + for shp in shape: + for ax in range(-1 * len(shp), len(shp), 2): + #test slice + for st in [-5, -2, 0, 2, 5, None]: + for ed in [-5, -2, 0, 2, 5, None]: + for stp in [-5, -2, 2, 5, None]: + config.append(tuple([shp, slice(st, ed, stp), None])) + config.append(tuple([shp, slice(st, ed, stp), ax])) + #test iteger + for idx in range(-1 * GetDimSize(shp, ax), GetDimSize(shp, ax)): + config.append(tuple([shp, idx, ax])) + #test ndarray indices + idx = _np.random.randint(-1 * shp[ax], shp[ax] + 1, size = (4)).tolist() + config.append(tuple([shp, idx, ax])) + + for arr_shape, obj, axis in config: + for objtype in ['int32', 'int64']: + if type(obj) == list: + obj_mxnp = np.array(obj, dtype=objtype) + obj_onp = _np.array(obj, dtype=objtype) + elif type(obj) == slice: + obj_mxnp = obj + obj_onp = obj + else: + obj_mxnp = (_np.int32(obj) if objtype == 'int32' else _np.int64(obj)) + obj_onp = (_np.int32(obj) if objtype == 'int32' else _np.int64(obj)) + test_delete = TestDelete(obj=obj_mxnp, axis=axis) + + a = mx.nd.random.uniform(-1.0, 1.0, shape=arr_shape).as_np_ndarray() + a.attach_grad() + expected_ret = _np.delete(a.asnumpy(), obj_onp, axis=axis) + + with mx.autograd.record(): + y = test_delete(a) + + assert y.shape == expected_ret.shape + assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3, atol=1e-5) + + #test imperative + mx_out = np.delete(a, obj_mxnp, axis=axis) + np_out = _np.delete(a.asnumpy(), obj_onp, axis=axis) + + assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) + + @with_seed() @use_np def test_np_argmin_argmax():