Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[Numpy] Add op fmax, fmin
Browse files Browse the repository at this point in the history
* Fix sanity

* Fix bug of gpu part, add scalar compute

* Finish cpu,gpu test of fmax, fmin
  • Loading branch information
hanke580 committed Feb 18, 2020
1 parent 4559ab8 commit 7168986
Show file tree
Hide file tree
Showing 11 changed files with 257 additions and 4 deletions.
2 changes: 1 addition & 1 deletion 3rdparty/dlpack
40 changes: 39 additions & 1 deletion python/mxnet/ndarray/numpy/_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',
Expand Down Expand Up @@ -4227,6 +4227,25 @@ def maximum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)


@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.fmax, _np.fmax, _npi.fmax_scalar, None, out)


@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
Expand All @@ -4246,6 +4265,25 @@ def minimum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)


@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.fmin, _np.fmin, _npi.fmin_scalar, None, out)


@set_module('mxnet.ndarray.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Expand Down
61 changes: 60 additions & 1 deletion python/mxnet/numpy/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@
'sort', 'tensordot', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange',
'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'insert',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'insert',
'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman',
'flip', 'flipud', 'fliplr', 'around', 'round', 'arctan2', 'hypot',
'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad',
Expand Down Expand Up @@ -5930,6 +5931,35 @@ def maximum(x1, x2, out=None, **kwargs):
return _mx_nd_np.maximum(x1, x2, out=out)


@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmax(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.fmax(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.fmax(x1, x2, out=out)


@set_module('mxnet.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
Expand Down Expand Up @@ -5959,6 +5989,35 @@ def minimum(x1, x2, out=None, **kwargs):
return _mx_nd_np.minimum(x1, x2, out=out)


@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The fmin of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmin(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.fmin(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.fmin(x1, x2, out=out)


@set_module('mxnet.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Expand Down
2 changes: 2 additions & 0 deletions python/mxnet/numpy_dispatch_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,9 @@ def _register_array_function():
'arccosh',
'arctanh',
'maximum',
'fmax',
'minimum',
'fmin',
'ceil',
'trunc',
'floor',
Expand Down
14 changes: 13 additions & 1 deletion python/mxnet/symbol/numpy/_symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort', 'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',
Expand Down Expand Up @@ -4024,12 +4024,24 @@ def maximum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)


@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.fmax, _np.fmax, _npi.fmax_scalar, None, out)


@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)


@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.fmin, _np.fmin, _npi.fmin_scalar, None, out)


@set_module('mxnet.symbol.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Expand Down
28 changes: 28 additions & 0 deletions src/operator/mshadow_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -1131,6 +1131,20 @@ struct maximum : public mxnet_op::tunable {
}
};

/*! \brief used for computing binary operator fmax */
struct fmax : public mxnet_op::tunable {
template<typename DType>
MSHADOW_XINLINE static DType Map(DType a, DType b) {
if (IsNan(b)) {
return a;
} else if (IsNan(a)) {
return b;
} else {
return (a > b ? a : b);
}
}
};

/*! \brief used for computing binary operator minimum */
struct minimum : public mxnet_op::tunable {
template<typename DType>
Expand All @@ -1143,6 +1157,20 @@ struct minimum : public mxnet_op::tunable {
}
};

/*! \brief used for computing binary operator fmin */
struct fmin : public mxnet_op::tunable {
template<typename DType>
MSHADOW_XINLINE static DType Map(DType a, DType b) {
if (IsNan(b)) {
return a;
} else if (IsNan(a)) {
return b;
} else {
return (a < b ? a : b);
}
}
};

/*! \brief boolean any/all kernel that determines whether elem is NonZero */
struct NonZero {
template<typename DType>
Expand Down
66 changes: 66 additions & 0 deletions src/operator/numpy/np_elemwise_broadcast_op_extended.cc
Original file line number Diff line number Diff line change
Expand Up @@ -371,5 +371,71 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_rldexp_scalar)
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::rldexp_grad>);

MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(broadcast_fmax)
.add_alias("_npi_fmax")
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmax>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_broadcast_fmax"});

NNVM_REGISTER_OP(_backward_broadcast_fmax)
.add_alias("_backward_npi_fmax")
.set_num_inputs(3)
.set_num_outputs(2)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
[](const NodeAttrs& attrs){
return std::vector<std::pair<int, int> >{{0, 1}};
})
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::ge,
mshadow_op::lt>);

MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_fmax_scalar)
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmax>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_fmax_scalar"})
.add_alias("_FmaxScalar")
.add_alias("_npi_fmax_scalar");

MXNET_OPERATOR_REGISTER_BINARY(_backward_fmax_scalar)
.add_alias("_backward_npi_fmax_scalar")
.add_argument("scalar", "float", "scalar value")
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::ge>);

MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(broadcast_fmin)
.add_alias("_npi_fmin")
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmin>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_broadcast_fmin"});

NNVM_REGISTER_OP(_backward_broadcast_fmin)
.add_alias("_backward_npi_fmin")
.set_num_inputs(3)
.set_num_outputs(2)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
[](const NodeAttrs& attrs){
return std::vector<std::pair<int, int> >{{0, 1}};
})
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::le,
mshadow_op::gt>);

MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_fmin_scalar)
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmin>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_fmin_scalar"})
.add_alias("_FminScalar")
.add_alias("_npi_fmin_scalar");

MXNET_OPERATOR_REGISTER_BINARY(_backward_fmin_scalar)
.add_alias("_backward_npi_fmin_scalar")
.add_argument("scalar", "float", "scalar value")
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::le>);

} // namespace op
} // namespace mxnet
26 changes: 26 additions & 0 deletions src/operator/numpy/np_elemwise_broadcast_op_extended.cu
Original file line number Diff line number Diff line change
Expand Up @@ -116,5 +116,31 @@ NNVM_REGISTER_OP(_backward_npi_ldexp_scalar)
NNVM_REGISTER_OP(_backward_npi_rldexp_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::rldexp_grad>);

NNVM_REGISTER_OP(_npi_fmax)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::fmax>);

NNVM_REGISTER_OP(_backward_npi_fmax)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastBackwardUseIn<gpu, mshadow_op::ge,
mshadow_op::lt>);

NNVM_REGISTER_OP(_npi_fmax_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::fmax>);

NNVM_REGISTER_OP(_backward_npi_fmax_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::ge>);

NNVM_REGISTER_OP(_npi_fmin)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::fmin>);

NNVM_REGISTER_OP(_backward_npi_fmin)
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastBackwardUseIn<gpu, mshadow_op::le,
mshadow_op::gt>);

NNVM_REGISTER_OP(_npi_fmin_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::fmin>);

NNVM_REGISTER_OP(_backward_npi_fmin_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::le>);

} // namespace op
} // namespace mxnet
2 changes: 2 additions & 0 deletions src/operator/operator_tune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,9 @@ IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::gelu_grad); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::prelu_grad); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::elu_grad); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::maximum); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::fmax); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::minimum); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::fmin); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::hypot); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::hypot_grad_left); // NOLINT()
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::hypot_grad_left); // NOLINT()
Expand Down
16 changes: 16 additions & 0 deletions tests/python/unittest/test_numpy_interoperability.py
Original file line number Diff line number Diff line change
Expand Up @@ -1454,13 +1454,27 @@ def _add_workload_maximum(array_pool):
OpArgMngr.add_workload('maximum', array_pool['4x1'], array_pool['1x1x0'])


def _add_workload_fmax(array_pool):
OpArgMngr.add_workload('fmax', array_pool['4x1'], array_pool['1x2'])
OpArgMngr.add_workload('fmax', array_pool['4x1'], 2)
OpArgMngr.add_workload('fmax', 2, array_pool['4x1'])
OpArgMngr.add_workload('fmax', array_pool['4x1'], array_pool['1x1x0'])


def _add_workload_minimum(array_pool):
OpArgMngr.add_workload('minimum', array_pool['4x1'], array_pool['1x2'])
OpArgMngr.add_workload('minimum', array_pool['4x1'], 2)
OpArgMngr.add_workload('minimum', 2, array_pool['4x1'])
OpArgMngr.add_workload('minimum', array_pool['4x1'], array_pool['1x1x0'])


def _add_workload_fmin(array_pool):
OpArgMngr.add_workload('fmin', array_pool['4x1'], array_pool['1x2'])
OpArgMngr.add_workload('fmin', array_pool['4x1'], 2)
OpArgMngr.add_workload('fmin', 2, array_pool['4x1'])
OpArgMngr.add_workload('fmin', array_pool['4x1'], array_pool['1x1x0'])


def _add_workload_negative(array_pool):
OpArgMngr.add_workload('negative', array_pool['4x1'])

Expand Down Expand Up @@ -2076,7 +2090,9 @@ def _prepare_workloads():
_add_workload_mod(array_pool)
_add_workload_remainder()
_add_workload_maximum(array_pool)
_add_workload_fmax(array_pool)
_add_workload_minimum(array_pool)
_add_workload_fmin(array_pool)
_add_workload_negative(array_pool)
_add_workload_absolute(array_pool)
_add_workload_sign(array_pool)
Expand Down
4 changes: 4 additions & 0 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -2268,8 +2268,12 @@ def hybrid_forward(self, F, a, b, *args, **kwargs):
'bitwise_or': (-100, 100, [None], None, [[_np.int32]]),
'maximum': (-1, 1, [lambda y, x1, x2: _np.ones(y.shape) * (x1 >= x2)],
[lambda y, x1, x2: _np.ones(y.shape) * (x1 < x2)]),
'fmax': (-1, 1, [lambda y, x1, x2: _np.ones(y.shape) * (x1 >= x2)],
[lambda y, x1, x2: _np.ones(y.shape) * (x1 < x2)]),
'minimum': (-1, 1, [lambda y, x1, x2: _np.ones(y.shape) * (x1 <= x2)],
[lambda y, x1, x2: _np.ones(y.shape) * (x1 > x2)]),
'fmin': (-1, 1, [lambda y, x1, x2: _np.ones(y.shape) * (x1 <= x2)],
[lambda y, x1, x2: _np.ones(y.shape) * (x1 > x2)]),
'copysign': (-1, 1,
[lambda y, x1, x2: _np.ones(y.shape) * (((x1 * x2) >= 0).astype(_np.float32) - ((x1 * x2) < 0).astype(_np.float32))],
[lambda y, x1, x2: _np.zeros(y.shape)]),
Expand Down

0 comments on commit 7168986

Please sign in to comment.