Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add loss_fn to IgniteMetric and rename to IgniteMetricHandler #6695

Merged
merged 27 commits into from
Jul 13, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
324e53f
Add DiceCEMetric
matt3o Jul 3, 2023
4384f4c
WiP: Add unittest for DiceCEMetric
matt3o Jul 3, 2023
94cb06f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 3, 2023
48c6ef9
Remove DiceCEMetric
matt3o Jul 4, 2023
6815816
Add IgniteLossMetric
matt3o Jul 4, 2023
2d77048
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 4, 2023
84ff449
Merge branch 'dev' into Add_dice_ce_metric
matt3o Jul 10, 2023
2524e0d
Undo previous commits as discussed
matt3o Jul 10, 2023
7c57f00
Add loss_fn support to IgniteMetric
matt3o Jul 10, 2023
14b0748
Delete previously created files
matt3o Jul 10, 2023
a3154ee
Modify IgniteMetric to also support loss_fn
matt3o Jul 10, 2023
0392f60
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 10, 2023
a51bd4d
Add tests for IgniteMetric(Handler)
matt3o Jul 10, 2023
d76f0ed
Fix formatting
matt3o Jul 10, 2023
9d93967
Update test cases for IgniteMetric(Handler)
matt3o Jul 11, 2023
974a3eb
Rename IgniteMetric to IgniteMetricHandler
matt3o Jul 11, 2023
acceeab
Rename test_handler_ignite_metric_handler to test_handler_ignite_metric
matt3o Jul 11, 2023
e77e97b
Remove warning
matt3o Jul 11, 2023
9ef3dac
Fix ignite ImportError
matt3o Jul 11, 2023
1388b46
Fix typing
matt3o Jul 11, 2023
91f85ae
Add deprecation warning for IgniteMetric
matt3o Jul 11, 2023
2c5d188
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 11, 2023
c8027b2
Add test_handler_ignite_metric to the min_tests list
matt3o Jul 11, 2023
f54a1bf
Fix code formatting
matt3o Jul 12, 2023
966c99a
Fix code formatting and remove debug prints
matt3o Jul 12, 2023
1a4b133
Merge branch 'dev' into Add_dice_ce_metric
wyli Jul 12, 2023
f92824a
Remove commented code
matt3o Jul 13, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions monai/handlers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,4 @@
from .tensorboard_handlers import TensorBoardHandler, TensorBoardImageHandler, TensorBoardStatsHandler
from .utils import from_engine, ignore_data, stopping_fn_from_loss, stopping_fn_from_metric, write_metrics_reports
from .validation_handler import ValidationHandler
from .ignite_loss_metric import IgniteLossMetric
65 changes: 65 additions & 0 deletions monai/handlers/dice_ce_metric.py
matt3o marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from collections.abc import Callable

from monai.handlers.ignite_metric import IgniteMetric
from monai.utils import MetricReduction
from monai.metrics import LossMetric

from torch.nn.modules.loss import _Loss

class LossMetricIgnite(IgniteMetric):
"""
Computes DiceCE score metric from full size Tensor and collects average over batch, class-channels, iterations.
"""

def __init__(
self,
loss_fn: _Loss,
reduction: MetricReduction | str = MetricReduction.MEAN,
output_transform: Callable = lambda x: x,
save_details: bool = True,
# *args,
# **kwargs
) -> None:
"""

Args:
loss_fn: a callable function that takes ``y_pred`` and optionally ``y`` as input (in the "batch-first" format),
returns a "batch-first" tensor of loss values.
include_background: whether to include dice computation on the first channel of the predicted output.
Defaults to True.
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
num_classes: number of input channels (always including the background). When this is None,
``y_pred.shape[1]`` will be used. This option is useful when both ``y_pred`` and ``y`` are
single-channel class indices and the number of classes is not automatically inferred from data.
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
`engine.state` and `output_transform` inherit from the ignite concept:
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
save_details: whether to save metric computation details per image, for example: mean dice of every image.
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
args: Arguments for the DiceCELoss

See also:
:py:meth:`monai.metrics.meandice.compute_dice`
"""
self.loss_fn = loss_fn
# loss_function = DiceCELoss(*args, **kwargs)
metric_fn = LossMetric(loss_fn=self.loss_fn, reduction=reduction, get_not_nans=False)
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
65 changes: 65 additions & 0 deletions monai/handlers/ignite_loss_metric.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from collections.abc import Callable

from monai.handlers.ignite_metric import IgniteMetric
from monai.utils import MetricReduction
from monai.metrics import LossMetric

from torch.nn.modules.loss import _Loss

class IgniteLossMetric(IgniteMetric):
matt3o marked this conversation as resolved.
Show resolved Hide resolved
"""
Computes DiceCE score metric from full size Tensor and collects average over batch, class-channels, iterations.
matt3o marked this conversation as resolved.
Show resolved Hide resolved
"""

def __init__(
self,
loss_fn: _Loss,
reduction: MetricReduction | str = MetricReduction.MEAN,
output_transform: Callable = lambda x: x,
save_details: bool = True,
# *args,
# **kwargs
) -> None:
"""

Args:
loss_fn: a callable function that takes ``y_pred`` and optionally ``y`` as input (in the "batch-first" format),
returns a "batch-first" tensor of loss values.
include_background: whether to include dice computation on the first channel of the predicted output.
Defaults to True.
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
num_classes: number of input channels (always including the background). When this is None,
``y_pred.shape[1]`` will be used. This option is useful when both ``y_pred`` and ``y`` are
single-channel class indices and the number of classes is not automatically inferred from data.
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
`engine.state` and `output_transform` inherit from the ignite concept:
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
save_details: whether to save metric computation details per image, for example: mean dice of every image.
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
args: Arguments for the DiceCELoss
matt3o marked this conversation as resolved.
Show resolved Hide resolved

See also:
:py:meth:`monai.metrics.meandice.compute_dice`
matt3o marked this conversation as resolved.
Show resolved Hide resolved
"""
self.loss_fn = loss_fn
# loss_function = DiceCELoss(*args, **kwargs)
matt3o marked this conversation as resolved.
Show resolved Hide resolved
metric_fn = LossMetric(loss_fn=self.loss_fn, reduction=reduction, get_not_nans=False)
matt3o marked this conversation as resolved.
Show resolved Hide resolved
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
98 changes: 98 additions & 0 deletions tests/test_handler_dice_ce_metric.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import unittest

import torch
from ignite.engine import Engine, Events
from parameterized import parameterized

from monai.handlers import DiceCEMetric, from_engine
from tests.utils import assert_allclose

TEST_CASE_1 = [{"include_background": True, "output_transform": from_engine(["pred", "label"])}, 0.813259, (4, 2)]
TEST_CASE_2 = [{"include_background": False, "output_transform": from_engine(["pred", "label"])}, 0.813259, (4, 1)]
TEST_CASE_3 = [
{"reduction": "mean_channel", "output_transform": from_engine(["pred", "label"])},
torch.Tensor([0.313262, 2.313251, 0.313262, 0.313262]),
(4, 2),
]


class TestHandlerDiceCEMetric(unittest.TestCase):
# TODO test multi node averaged dice

@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
def test_compute(self, input_params, expected_avg, details_shape):
dice_metric = DiceCEMetric(**input_params)

def _val_func(engine, batch):
pass

engine = Engine(_val_func)
dice_metric.attach(engine=engine, name="dice_ce_metric")
# test input a list of channel-first tensor
y_pred = [torch.Tensor([[0], [1]]), torch.Tensor([[1], [0]])]
y = torch.Tensor([[[0], [1]], [[0], [1]]])
engine.state.output = {"pred": y_pred, "label": y}
engine.fire_event(Events.ITERATION_COMPLETED)

y_pred = [torch.Tensor([[0], [1]]), torch.Tensor([[1], [0]])]
y = torch.Tensor([[[0], [1]], [[1], [0]]])
engine.state.output = {"pred": y_pred, "label": y}
engine.fire_event(Events.ITERATION_COMPLETED)

engine.fire_event(Events.EPOCH_COMPLETED)
assert_allclose(engine.state.metrics["dice_ce_metric"], expected_avg, atol=1e-4, rtol=1e-4, type_test=False)
self.assertTupleEqual(tuple(engine.state.metric_details["dice_ce_metric"].shape), details_shape)

@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_shape_mismatch(self, input_params, _expected_avg, _details_shape):
dice_metric = DiceCEMetric(**input_params)
with self.assertRaises((AssertionError, ValueError)):
y_pred = torch.Tensor([[0, 1], [1, 0]])
y = torch.ones((2, 3))
dice_metric.update([y_pred, y])

with self.assertRaises((AssertionError, ValueError)):
y_pred = torch.Tensor([[0, 1], [1, 0]])
y = torch.ones((3, 2))
dice_metric.update([y_pred, y])

# @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
# def test_compute_n_class(self, input_params, expected_avg, details_shape):
# dice_metric = DiceCEMetric(num_classes=2, **input_params)

# def _val_func(engine, batch):
# pass

# engine = Engine(_val_func)
# dice_metric.attach(engine=engine, name="dice_ce_metric")
# # test input a list of channel-first tensor
# y_pred = [torch.Tensor([[1]]), torch.Tensor([[0]])]
# y = torch.Tensor([[[0], [1]], [[0], [1]]])
# engine.state.output = {"pred": y_pred, "label": y}
# engine.fire_event(Events.ITERATION_COMPLETED)

# y_pred = [torch.Tensor([[1]]), torch.Tensor([[0]])] # class indices y_pred
# y = torch.Tensor([[[1]], [[0]]]) # class indices y
# engine.state.output = {"pred": y_pred, "label": y}
# engine.fire_event(Events.ITERATION_COMPLETED)

# engine.fire_event(Events.EPOCH_COMPLETED)
# assert_allclose(engine.state.metrics["dice_ce_metric"], expected_avg, atol=1e-4, rtol=1e-4, type_test=False)
# self.assertTupleEqual(tuple(engine.state.metric_details["dice_ce_metric"].shape), details_shape)


if __name__ == "__main__":
unittest.main()