Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Relay, TOPI] Add negative log likelihood loss (nll_loss) op #8056

Merged
merged 7 commits into from
Jun 25, 2021
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -1426,6 +1426,19 @@ struct BatchToSpaceNDAttrs : public tvm::AttrsNode<BatchToSpaceNDAttrs> {
}
}; // struct BatchToSpaceNDAttrs

/*! \brief Attributes used in NLLLoss operator */
struct NLLLossAttrs : public tvm::AttrsNode<NLLLossAttrs> {
std::string reduction;
int ignore_index;

TVM_DECLARE_ATTRS(NLLLossAttrs, "relay.attrs.NLLLossAttrs") {
TVM_ATTR_FIELD(reduction).set_default("mean").describe(
"The reduction method to apply to the output. Can be"
"'none', 'mean' or 'sum'.");
TVM_ATTR_FIELD(ignore_index).describe("The target value to ignore.");
}
}; // struct NLLLossAttrs

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_NN_H_
48 changes: 48 additions & 0 deletions include/tvm/topi/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <tvm/tir/expr.h>
#include <tvm/tir/op.h>
#include <tvm/topi/detail/constant_utils.h>
#include <tvm/topi/reduction.h>
#include <tvm/topi/tags.h>
#include <tvm/topi/transform.h>

Expand Down Expand Up @@ -642,6 +643,53 @@ inline tvm::te::Tensor batch_to_space_nd(const tvm::te::Tensor& data,
out = strided_slice(out, begin_idx, end_idx, strides);
return out;
}

/*!
* \brief Negative log likelihood loss.
*
* \param predictions The prediction tensor.
* \param targets The target tensor.
* \param weights A manual rescaling weight given to each class.
* \param reduction The reduction method to apply to the output.
* \param ignore_index The target value to ignore.
* \param name The name of the operation.
* \param tag The tag to mark the operation.
*
* \return The negative log likelihood loss of the predictions and targets.
*/
inline Tensor nll_loss(const Tensor& predictions, const Tensor& targets, const Tensor& weights,
std::string reduction = "mean", int ignore_index = -100,
zhuzilin marked this conversation as resolved.
Show resolved Hide resolved
const std::string name = "nll_loss", const std::string tag = kBroadcast) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should the tag be kOpaque to match the Relay pattern?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@altanh I am confused with the tag in topi (ones in topi/tags.h) and the OpPatternKind in relay/op_attr_types.h. It seems that they are not matched. Could you tell me the usage of them in tvm? Thank you~

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see, that is confusing.. I'll get back to you on this soon

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@altanh Could you take a look at the tests? Thank you~ And I wonder if there is any update with the tag and OpPatternKind?

I don't have much of an update for the tag, maybe you could try leaving it empty string?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

tag here is topi-level, sometimes we use it to identify a specific compute operation during schedule, otherwise we can leave it empty

auto T = tvm::te::compute(
targets->shape,
[&](const tvm::Array<tvm::tir::Var>& target_indices) {
auto c = targets(target_indices);
tvm::Array<tvm::PrimExpr> pred_indices;
pred_indices.push_back(target_indices[0]); // batch index
pred_indices.push_back(c); // class index
for (size_t i = 1; i < target_indices.size(); i++) {
pred_indices.push_back(target_indices[i]); // indices for multidimensional loss
}
return tvm::tir::Select(c != ignore_index, -predictions(pred_indices) * weights(c),
tvm::tir::make_const(predictions->dtype, 0));
},
name, tag);
if (reduction == "mean") {
auto W = tvm::te::compute(
targets->shape,
[&](const tvm::Array<tvm::tir::Var>& target_indices) {
auto c = targets(target_indices);
return tvm::tir::Select(c != ignore_index, weights(c),
tvm::tir::make_const(predictions->dtype, 0));
},
name, tag);
return topi::divide(topi::sum(T, {}), topi::sum(W, {}));
} else if (reduction == "sum") {
return topi::sum(T, {});
} else { // reduction == "none"
return T;
}
}
} // namespace topi
} // namespace tvm
#endif // TVM_TOPI_NN_H_
16 changes: 16 additions & 0 deletions python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2305,6 +2305,20 @@ def unique(self, inputs, input_types):
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, indices)

def nll_loss(self, inputs, input_types):
assert len(inputs) == 5
[predictions, targets, weights, reduction, ignore_index] = inputs
num_class = self.infer_shape(predictions)[1]
if reduction == 0:
reduction = "none"
elif reduction == 1:
reduction = "mean"
else:
reduction = "sum"
if weights is None:
weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])
return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)

# Operator mappings
def create_convert_map(self):
self.convert_map = {
Expand Down Expand Up @@ -2517,6 +2531,8 @@ def create_convert_map(self):
"aten::argsort": self.argsort,
"aten::sort": self.sort,
"aten::_unique2": self.unique,
"aten::nll_loss": self.nll_loss,
"aten::nll_loss2d": self.nll_loss,
}

def update_convert_map(self, custom_map):
Expand Down
11 changes: 11 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -886,6 +886,17 @@ def compute_cross_entropy_with_logits(attrs, inputs, out_dtype):
reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE)


# nll_loss
@reg.register_compute("nn.nll_loss")
def compute_nll_loss(attrs, inputs, out_dtype):
predictions, targets, weights = inputs
return [topi.nn.nll_loss(predictions, targets, weights, attrs.reduction, attrs.ignore_index)]


reg.register_reduce_schedule("nn.nll_loss")
reg.register_pattern("nn.nll_loss", OpPattern.OUT_ELEMWISE_FUSABLE)


# depth_to_space
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype):
Expand Down
36 changes: 36 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2973,6 +2973,42 @@ def cross_entropy_with_logits(predictions, targets):
return _make.cross_entropy_with_logits(predictions, targets)


def nll_loss(predictions, targets, weights, reduction="mean", ignore_index=-100):
"""Negative log likelihood loss.
zhuzilin marked this conversation as resolved.
Show resolved Hide resolved

output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0

result = reduction(output)

Parameters
----------
predictions : tvm.relay.Expr
The predictions.

targets : tvm.relay.Expr
The target value of each prediction.

weights : tvm.relay.Expr
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we make weights optional, like PyTorch? weights=1 is a pretty common case I believe and we could add a fast path implementation that skips the scaling

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@altanh We can make weights an optional parameter. I wonder if there are any example of a relay op with an optional tensor parameter that I can learn from. And also, how should we deal with gradient of an optional parameter? BTW, is there any better way we can mark a parameter as "no need for gradient" instead of returning an one_like grad?

Copy link
Contributor

@altanh altanh May 24, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm not sure, that's a good point- let's just keep the weights for now. As for not needing a gradient, currently there is no other way than just putting some dummy value. It might make sense for us to introduce a stop_gradient dummy op which cuts the gradient computation from going further at undifferentiable arguments (this can be a future PR). Thanks!

The weight of each target value.

reduction : string
The reduction method to apply to the output.
zhuzilin marked this conversation as resolved.
Show resolved Hide resolved
Possible values are "mean", "sum" and "none".

ignore_index : int
The target value to ignore.

Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.nll_loss(predictions, targets, weights, reduction, ignore_index)


def depth_to_space(data, block_size, layout="NCHW", mode="DCR"):
"""Convert channels into spatial blocks.

Expand Down
5 changes: 5 additions & 0 deletions python/tvm/relay/op/op_attrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -572,3 +572,8 @@ class ThreefryGenerateAttrs(Attrs):
@tvm._ffi.register_object("relay.attrs.UniformAttrs")
class UniformAttrs(Attrs):
"""Attributes used in UniformAttrs operators"""


@tvm._ffi.register_object("relay.attrs.NLLLossAttrs")
class NLLLossAttrs(Attrs):
"""Attributes for nn.nll_loss"""
1 change: 1 addition & 0 deletions python/tvm/topi/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,4 @@
from .space_to_depth import *
from .space_to_batch_nd import *
from .batch_to_space_nd import *
from .loss import *
60 changes: 60 additions & 0 deletions python/tvm/topi/nn/loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Loss functions definitions."""
from __future__ import absolute_import
from . import cpp


def nll_loss(predictions, targets, weights, reduction, ignore_index):
"""Negative log likelihood loss on the input data.

output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0

result = reduction(output)

Parameters
----------
predictions : tvm.te.Tensor
(k+2)-D with shape (N, C, d_1, d_2, ..., d_k),
where C is the number of target classes

targets : tvm.te.Tensor
(k+1)-D with shape (N, d_1, d_2, ..., d_k)
The target value of the input.

weights : tvm.te.Tensor
1-D with shape (C,)
The weight of each target value.

reduction : string
The reduction method to apply to output.
Can be "mean", "sum" or "none".

ignore_index : int
The target value to ignore.

Returns
-------
output : tvm.te.Tensor
a scalar if the reduction type is "mean" or "sum",
otherwise the same shape as `target`.
"""
return cpp.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
1 change: 1 addition & 0 deletions python/tvm/topi/testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,4 @@
from .matrix_set_diag import matrix_set_diag
from .space_to_batch_nd import space_to_batch_nd_python
from .batch_to_space_nd import batch_to_space_nd_python
from .nll_loss import nll_loss
72 changes: 72 additions & 0 deletions python/tvm/topi/testing/nll_loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""NLLLoss in python"""
import numpy as np


def nll_loss(predictions, targets, weights, reduction="mean", ignore_index=-100):
"""nll_loss operator implemented in numpy.

output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0

result = reduction(output)

Parameters
----------
predictions : numpy.ndarray
(k+2)-D with shape (N, C, d_1, d_2, ..., d_k),
where C is the number of target classes

targets : numpy.ndarray
(k+1)-D with shape (N, d_1, d_2, ..., d_k)
The target value of the input.

weights : numpy.ndarray
1-D with shape (C,)
The weight of each target value.

reduction : string
The reduction method to apply to output.
Can be "mean", "sum" or "none".

ignore_index : int
The target value to ignore.

Returns
-------
output : numpy.ndarray
a scalar if the reduction type is "mean" or "sum",
otherwise the same shape as `target`.
"""
res = np.zeros(targets.shape)
weight_sum = 0.0
for index in np.ndindex(targets.shape):
class_id = targets[index]
if class_id != ignore_index:
index_list = list(index)
pred_index = tuple(index_list[:1] + [class_id] + index_list[1:])
res[index] = -predictions[pred_index] * weights[class_id]
weight_sum += weights[class_id]
if reduction == "mean":
return np.sum(res) / weight_sum
if reduction == "sum":
return np.sum(res)
return res
Loading