diff --git a/src/operator/tensor/la_op-inl.h b/src/operator/tensor/la_op-inl.h index c795c2a3f375..de27187bca9a 100644 --- a/src/operator/tensor/la_op-inl.h +++ b/src/operator/tensor/la_op-inl.h @@ -510,25 +510,6 @@ struct det { } }; -// logdet = log(det(A)) -struct logdet { - template - static void op(const Tensor& A, const Tensor& logdet, - const Tensor& LU, const Tensor& pivot, - const OpContext& ctx, const nnvm::NodeAttrs& attrs) { - Stream *s = ctx.get_stream(); - Tensor sign = ctx.requested[0] - .get_space_typed(logdet.shape_, s); - Copy(LU, A, s); - linalg_batch_getrf(LU, pivot, false, s); - using namespace mxnet_op; - using namespace mshadow::expr; - Kernel::Launch(s, pivot.size(0), pivot.size(1), pivot.dptr_, - LU.dptr_, sign.dptr_, logdet.dptr_); - const_cast&>(logdet) = F(sign) + logdet; - } -}; - // sign = sign(det(A)) // logabsdet = log(abs(det(A))) struct slogdet { @@ -941,33 +922,6 @@ struct det_backward { } }; -// Backward of logdet(A) is derived from Jacobi's formula. -// The closed form solution is pretty easy when A is invertible. -// For non-invertible A, grad is not backwarded now. -// TODO(arcadiaphy) add implementation for non-invertible case -struct logdet_backward { - template - static void op(const Tensor& dlogdet, - const Tensor& logdet, - const Tensor& LU, - const Tensor& pivot, - const Tensor& dA, - const OpContext& ctx, const nnvm::NodeAttrs& attrs) { - using namespace mshadow; - using namespace mshadow::expr; - using namespace mxnet_op; - // compute inverse(A) and stores it to LU - linalg_batch_det_backward_helper(LU, pivot, logdet, dA, DType(-INFINITY), ctx); - const_cast&>(dA) = broadcast_to(reshape(dlogdet, \ - Shape3(logdet.size(0), 1, 1)), mxnet::TShape(LU.shape_)) * \ - transpose(LU, Shape3(0, 2, 1)); - Stream *s = ctx.get_stream(); - // stop grad for zero det temporarily - Kernel::Launch(s, dA.shape_.Size(), dA.size(1) * dA.size(2), \ - dA.dptr_, logdet.dptr_, DType(-INFINITY)); - } -}; - // Backward of slogdet(A) is derived from Jacobi's formula. // The closed form solution is pretty easy when A is invertible. // For non-invertible A, grad is not backwarded now. diff --git a/src/operator/tensor/la_op.cc b/src/operator/tensor/la_op.cc index 2c322846c42e..dade5692800d 100644 --- a/src/operator/tensor/la_op.cc +++ b/src/operator/tensor/la_op.cc @@ -986,54 +986,6 @@ NNVM_REGISTER_OP(_backward_linalg_det) .set_attr("TIsBackward", true) .set_attr("FCompute", LaOpDetBackward); -NNVM_REGISTER_OP(_linalg_logdet) -.add_alias("linalg_logdet") -.describe(R"code(Compute the log determinant of a matrix. -Input is a tensor *A* of dimension *n >= 2*. - -If *n=2*, *A* is a square matrix. We compute: - - *out* = *log(det(A))* - -If *n>2*, *logdet* is performed separately on the trailing two dimensions -for all inputs (batch mode). - -.. note:: The operator supports float32 and float64 data types only. - -Examples:: - - Single matrix inversion - A = [[2., 3.], [1., 4.]] - logdet(A) = [1.609438] - - Batch matrix inversion - A = [[[2., 3.], [1., 4.]], - [[1., 2.], [2., 4.]], - [[1., 2.], [4., 3.]]] - logdet(A) = [1.609438, -inf, nan] -)code" ADD_FILELINE) -.set_num_inputs(1) -.set_num_outputs(3) -.set_attr("FListInputNames", [](const NodeAttrs& attrs) - { return std::vector{"A"}; }) -.set_attr("FNumVisibleOutputs", [](const NodeAttrs& attrs) { - return 1; }) -.set_attr("FInferShape", DetShape<1>) -.set_attr("FInferType", DetType<1>) -.set_attr("FResourceRequest", [](const NodeAttrs& attrs) - { return std::vector{ResourceRequest::kTempSpace}; }) -.set_attr("FCompute", LaOpDetForward) -.set_attr("FGradient", ReduceDetGrad<1>{"_backward_linalg_logdet"}) -.add_argument("A", "NDArray-or-Symbol", "Tensor of square matrix"); - -NNVM_REGISTER_OP(_backward_linalg_logdet) -.set_num_inputs(4) -.set_num_outputs(1) -.set_attr("FResourceRequest", [](const NodeAttrs& attrs) - { return std::vector{ResourceRequest::kTempSpace}; }) -.set_attr("TIsBackward", true) -.set_attr("FCompute", LaOpDetBackward); - NNVM_REGISTER_OP(_linalg_slogdet) .add_alias("linalg_slogdet") .describe(R"code(Compute the sign and log of the determinant of a matrix. diff --git a/src/operator/tensor/la_op.cu b/src/operator/tensor/la_op.cu index 74fc97b75d6b..68c33180e3d5 100644 --- a/src/operator/tensor/la_op.cu +++ b/src/operator/tensor/la_op.cu @@ -105,12 +105,6 @@ NNVM_REGISTER_OP(_linalg_det) NNVM_REGISTER_OP(_backward_linalg_det) .set_attr("FCompute", LaOpDetBackward); -NNVM_REGISTER_OP(_linalg_logdet) -.set_attr("FCompute", LaOpDetForward); - -NNVM_REGISTER_OP(_backward_linalg_logdet) -.set_attr("FCompute", LaOpDetBackward); - NNVM_REGISTER_OP(_linalg_slogdet) .set_attr("FCompute", LaOpDetForward); diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 764f0e59a6d7..3c59524b4fee 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -6536,11 +6536,6 @@ def test_laop_6(): test_det = mx.sym.linalg.det(data) check_fw(test_det, [a], [r]) check_grad(test_det, [a]) - # logdet - r = np.log(np.linalg.det(a)) - test_logdet = mx.sym.linalg.logdet(data) - check_fw(test_logdet, [a], [r]) - check_grad(test_logdet, [a]) # test slogdet r1 = np.array([1., 1., 1.]) r2 = np.log(np.abs(np.linalg.det(a)))