From 33af45c23f8846bb22b5b7e512ae597e40c32a76 Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Tue, 12 Mar 2024 19:18:49 +0100 Subject: [PATCH 01/12] Implementation of intensity clipping transform: bot hard clipping and soft clipping approaches Signed-off-by: Lucas Robinet --- docs/source/transforms.rst | 12 ++ monai/transforms/__init__.py | 4 + monai/transforms/intensity/array.py | 115 +++++++++++++++- monai/transforms/intensity/dictionary.py | 35 +++++ monai/transforms/utils.py | 37 +++++ .../utils_pytorch_numpy_unification.py | 15 +++ tests/test_clip_intensity_percentiles.py | 115 ++++++++++++++++ tests/test_clip_intensity_percentilesd.py | 127 ++++++++++++++++++ tests/test_soft_clip.py | 125 +++++++++++++++++ 9 files changed, 584 insertions(+), 1 deletion(-) create mode 100644 tests/test_clip_intensity_percentiles.py create mode 100644 tests/test_clip_intensity_percentilesd.py create mode 100644 tests/test_soft_clip.py diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 8990e7991d..4b3cacfb2d 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -309,6 +309,12 @@ Intensity :members: :special-members: __call__ +`ClipIntensityPercentiles` +"""""""""""""""""""""""""" +.. autoclass:: ClipIntensityPercentiles + :members: + :special-members: __call__ + `RandScaleIntensity` """""""""""""""""""" .. image:: https://raw.githubusercontent.com/Project-MONAI/DocImages/main/transforms/RandScaleIntensity.png @@ -1384,6 +1390,12 @@ Intensity (Dict) :members: :special-members: __call__ +`ClipIntensityPercentilesd` +""""""""""""""""""""""""""" +.. autoclass:: ClipIntensityPercentilesd + :members: + :special-members: __call__ + `RandScaleIntensityd` """"""""""""""""""""" .. image:: https://raw.githubusercontent.com/Project-MONAI/DocImages/main/transforms/RandScaleIntensityd.png diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py index 2aa8fbf8a1..0a0eaae047 100644 --- a/monai/transforms/__init__.py +++ b/monai/transforms/__init__.py @@ -92,6 +92,7 @@ from .croppad.functional import crop_func, crop_or_pad_nd, pad_func, pad_nd from .intensity.array import ( AdjustContrast, + ClipIntensityPercentiles, ComputeHoVerMaps, DetectEnvelope, ForegroundMask, @@ -135,6 +136,9 @@ AdjustContrastd, AdjustContrastD, AdjustContrastDict, + ClipIntensityPercentilesd, + ClipIntensityPercentilesD, + ClipIntensityPercentilesDict, ComputeHoVerMapsd, ComputeHoVerMapsD, ComputeHoVerMapsDict, diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index a2f63a7482..470e080c65 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -30,7 +30,7 @@ from monai.data.utils import get_random_patch, get_valid_patch_size from monai.networks.layers import GaussianFilter, HilbertTransform, MedianFilter, SavitzkyGolayFilter from monai.transforms.transform import RandomizableTransform, Transform -from monai.transforms.utils import Fourier, equalize_hist, is_positive, rescale_array +from monai.transforms.utils import Fourier, equalize_hist, is_positive, rescale_array, soft_clip from monai.transforms.utils_pytorch_numpy_unification import clip, percentile, where from monai.utils.enums import TransformBackends from monai.utils.misc import ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple @@ -54,6 +54,7 @@ "NormalizeIntensity", "ThresholdIntensity", "ScaleIntensityRange", + "ClipIntensityPercentiles", "AdjustContrast", "RandAdjustContrast", "ScaleIntensityRangePercentiles", @@ -1007,6 +1008,118 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: return ret +class ClipIntensityPercentiles(Transform): + """ + Apply clip based on the intensity distribution of input image. + If `sharpness_factor` is provided, the intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor)*softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)) + From https://medium.com/life-at-hopper/clip-it-clip-it-good-1f1bf711b291 + + Soft clipping preserves the order of the values and maintains the gradient everywhere. + For example: + + .. code-block:: python + :emphasize-lines: 11, 22 + + image = torch.Tensor( + [[[1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5]]]) + + # Hard clipping from lower and upper image intensity percentiles + hard_clipper = ClipIntensityPercentiles(30, 70) + print(hard_clipper(image)) + metatensor([[[2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.], + [2., 2., 3., 4., 4.]]]) + + + # Soft clipping from lower and upper image intensity percentiles + soft_clipper = ClipIntensityPercentiles(30, 70, 10.) + print(soft_clipper(image)) + metatensor([[[2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000], + [2.0000, 2.0693, 3.0000, 3.9307, 4.0000]]]) + + See Also: + + - :py:class:`monai.transforms.ScaleIntensityRangePercentiles` + """ + + backend = [TransformBackends.TORCH, TransformBackends.NUMPY] + + def __init__( + self, + lower: float | None, + upper: float | None, + sharpness_factor: float | None = None, + channel_wise: bool = False, + dtype: DtypeLike = np.float32, + ) -> None: + """ + Args: + lower: lower intensity percentile. + upper: upper intensity percentile. + sharpness_factor: if not None, the intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor)*softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)). + defaults to None. + channel_wise: if True, compute intensity percentile and normalize every channel separately. + default to False. + dtype: output data type, if None, same as input image. defaults to float32. + """ + if lower is None and upper is None: + raise ValueError("lower or upper percentiles must be provided") + if lower is not None and (lower < 0.0 or lower > 100.0): + raise ValueError("Percentiles must be in the range [0, 100]") + if upper is not None and (upper < 0.0 or upper > 100.0): + raise ValueError("Percentiles must be in the range [0, 100]") + if upper is not None and lower is not None and upper < lower: + raise ValueError("upper must be greater than or equal to lower") + if sharpness_factor is not None and sharpness_factor <= 0: + raise ValueError("sharpness_factor must be greater than 0") + + self.lower = lower + self.upper = upper + self.sharpness_factor = sharpness_factor + self.channel_wise = channel_wise + self.dtype = dtype + + def _normalize(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + if self.sharpness_factor is not None: + lower_percentile = percentile(img, self.lower) if self.lower is not None else None + upper_percentile = percentile(img, self.upper) if self.upper is not None else None + img = soft_clip(img, self.sharpness_factor, lower_percentile, upper_percentile, self.dtype) + else: + lower_percentile = percentile(img, self.lower) if self.lower is not None else percentile(img, 0) + upper_percentile = percentile(img, self.upper) if self.upper is not None else percentile(img, 100) + img = clip(img, lower_percentile, upper_percentile) + + img = convert_to_tensor(img, track_meta=False) + return img + + def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + """ + Apply the transform to `img`. + """ + img = convert_to_tensor(img, track_meta=get_track_meta()) + img_t = convert_to_tensor(img, track_meta=False) + if self.channel_wise: + img_t = torch.stack([self._normalize(img=d) for d in img_t]) # type: ignore + else: + img_t = self._normalize(img=img_t) + + return convert_to_dst_type(img_t, dst=img)[0] + + class AdjustContrast(Transform): """ Changes image intensity with gamma transform. Each pixel/voxel intensity is updated as:: diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 7e93464e64..6b078a661e 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -26,6 +26,7 @@ from monai.data.meta_obj import get_track_meta from monai.transforms.intensity.array import ( AdjustContrast, + ClipIntensityPercentiles, ComputeHoVerMaps, ForegroundMask, GaussianSharpen, @@ -77,6 +78,7 @@ "NormalizeIntensityd", "ThresholdIntensityd", "ScaleIntensityRanged", + "ClipIntensityPercentilesd", "AdjustContrastd", "RandAdjustContrastd", "ScaleIntensityRangePercentilesd", @@ -122,6 +124,8 @@ "ThresholdIntensityDict", "ScaleIntensityRangeD", "ScaleIntensityRangeDict", + "ClipIntensityPercentilesD", + "ClipIntensityPercentilesDict", "AdjustContrastD", "AdjustContrastDict", "RandAdjustContrastD", @@ -886,6 +890,36 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N return d +class ClipIntensityPercentilesd(MapTransform): + """ + Dictionary-based wrapper of :py:class:`monai.transforms.ClipIntensityPercentiles`. + Clip the intensity values of input image to a specific range based on the intensity distribution of the input. + If `sharpness_factor` is provided, the intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor) * softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)) + """ + + def __init__( + self, + keys: KeysCollection, + lower: float | None, + upper: float | None, + sharpness_factor: float | None = None, + channel_wise: bool = False, + dtype: DtypeLike = np.float32, + allow_missing_keys: bool = False, + ) -> None: + super().__init__(keys, allow_missing_keys) + self.scaler = ClipIntensityPercentiles( + lower=lower, upper=upper, sharpness_factor=sharpness_factor, channel_wise=channel_wise, dtype=dtype + ) + + def __call__(self, data: dict) -> dict: + d = dict(data) + for key in self.key_iterator(d): + d[key] = self.scaler(d[key]) + return d + + class AdjustContrastd(MapTransform): """ Dictionary-based wrapper of :py:class:`monai.transforms.AdjustContrast`. @@ -1928,6 +1962,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N NormalizeIntensityD = NormalizeIntensityDict = NormalizeIntensityd ThresholdIntensityD = ThresholdIntensityDict = ThresholdIntensityd ScaleIntensityRangeD = ScaleIntensityRangeDict = ScaleIntensityRanged +ClipIntensityPercentilesD = ClipIntensityPercentilesDict = ClipIntensityPercentilesd AdjustContrastD = AdjustContrastDict = AdjustContrastd RandAdjustContrastD = RandAdjustContrastDict = RandAdjustContrastd ScaleIntensityRangePercentilesD = ScaleIntensityRangePercentilesDict = ScaleIntensityRangePercentilesd diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index e282ecff24..c5744689a3 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -38,6 +38,7 @@ nonzero, ravel, searchsorted, + softplus, unique, unravel_index, where, @@ -131,9 +132,45 @@ "resolves_modes", "has_status_keys", "distance_transform_edt", + "soft_clip", ] +def soft_clip( + arr: NdarrayOrTensor, + sharpness_factor: float = 1.0, + minv: float | None = None, + maxv: float | None = None, + dtype: DtypeLike | torch.dtype = np.float32, +) -> NdarrayOrTensor: + """ + Apply soft clip to the input array or tensor. + The intensity values will be soft clipped according to + f(x) = x + (1/sharpness_factor)*softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)) + From https://medium.com/life-at-hopper/clip-it-clip-it-good-1f1bf711b291 + + To perform one-sided clipping, set either minv or maxv to None. + Args: + arr: input array to clip. + sharpness_factor: the sharpness of the soft clip function, default to 1. + minv: minimum value of target clipped array. + maxv: maximum value of target clipped array. + dtype: if not None, convert input array to dtype before computation. + + """ + + if dtype is not None: + arr, *_ = convert_data_type(arr, dtype=dtype) + + v = arr + if minv is not None: + v = v + softplus(-sharpness_factor * (arr - minv)) / sharpness_factor + if maxv is not None: + v = v - softplus(sharpness_factor * (arr - maxv)) / sharpness_factor + + return v + + def rand_choice(prob: float = 0.5) -> bool: """ Returns True if a randomly chosen number is less than or equal to `prob`, by default this is a 50/50 chance. diff --git a/monai/transforms/utils_pytorch_numpy_unification.py b/monai/transforms/utils_pytorch_numpy_unification.py index 0774d50314..020d99af16 100644 --- a/monai/transforms/utils_pytorch_numpy_unification.py +++ b/monai/transforms/utils_pytorch_numpy_unification.py @@ -52,9 +52,24 @@ "median", "mean", "std", + "softplus", ] +def softplus(x: NdarrayOrTensor) -> NdarrayOrTensor: + """stable softplus through `np.logaddexp` with equivalent implementation for torch. + + Args: + x: array/tensor. + + Returns: + Softplus of the input. + """ + if isinstance(x, np.ndarray): + return np.logaddexp(np.zeros_like(x), x) + return torch.logaddexp(torch.zeros_like(x), x) + + def allclose(a: NdarrayTensor, b: NdarrayOrTensor, rtol=1e-5, atol=1e-8, equal_nan=False) -> bool: """`np.allclose` with equivalent implementation for torch.""" b, *_ = convert_to_dst_type(b, a, wrap_sequence=True) diff --git a/tests/test_clip_intensity_percentiles.py b/tests/test_clip_intensity_percentiles.py new file mode 100644 index 0000000000..75b4b54483 --- /dev/null +++ b/tests/test_clip_intensity_percentiles.py @@ -0,0 +1,115 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import ClipIntensityPercentiles +from monai.transforms.utils import soft_clip +from monai.transforms.utils_pytorch_numpy_unification import clip +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose + + +class TestClipIntensityPercentiles(NumpyImageTestCase2D): + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=None) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + hard_clipper = ClipIntensityPercentiles(upper=None, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper(im) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + def test_ill_sharpness_factor(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=0.0) + + def test_ill_lower_percentile(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=None, lower=-1) + + def test_ill_upper_percentile(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=101, lower=None) + + def test_ill_percentiles(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=95, lower=96) + + def test_ill_both_none(self): + with self.assertRaises(ValueError): + ClipIntensityPercentiles(upper=None, lower=None) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_clip_intensity_percentilesd.py b/tests/test_clip_intensity_percentilesd.py new file mode 100644 index 0000000000..21516a2c9c --- /dev/null +++ b/tests/test_clip_intensity_percentilesd.py @@ -0,0 +1,127 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +from parameterized import parameterized + +from monai.transforms import ClipIntensityPercentilesd +from monai.transforms.utils import soft_clip +from monai.transforms.utils_pytorch_numpy_unification import clip +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose + + +class TestClipIntensityPercentilesd(NumpyImageTestCase2D): + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + key = "img" + clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper({key: im}) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + def test_ill_sharpness_factor(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=0.0) + + def test_ill_lower_percentile(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=None, lower=-1) + + def test_ill_upper_percentile(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=101, lower=None) + + def test_ill_percentiles(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=95, lower=96) + + def test_ill_both_none(self): + key = "img" + with self.assertRaises(ValueError): + ClipIntensityPercentilesd(keys=[key], upper=None, lower=None) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_soft_clip.py b/tests/test_soft_clip.py new file mode 100644 index 0000000000..de5122e982 --- /dev/null +++ b/tests/test_soft_clip.py @@ -0,0 +1,125 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import numpy as np +import torch +from parameterized import parameterized + +from monai.transforms.utils import soft_clip + +TEST_CASES = [ + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 10}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 7.9307, 8.0000]), + }, + ], + [ + {"minv": 2, "maxv": None, "sharpness_factor": 10}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]), + }, + ], + [ + {"minv": None, "maxv": 7, "sharpness_factor": 10}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([0.0000, 1.0000, 2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 6.9307, 7.0000, 7.0000]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 1.0}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.1266, 2.3124, 2.6907, 3.3065, 4.1088, 5.0000, 5.8912, 6.6935, 7.3093, 7.6877]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 3.0}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0008, 2.0162, 2.2310, 3.0162, 4.0008, 5.0000, 5.9992, 6.9838, 7.7690, 7.9838]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 5.0}, + { + "input": torch.arange(10).float(), + "clipped": torch.tensor([2.0000, 2.0013, 2.1386, 3.0013, 4.0000, 5.0000, 6.0000, 6.9987, 7.8614, 7.9987]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 10}, + { + "input": np.arange(10).astype(np.float32), + "clipped": np.array([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 7.9307, 8.0000]), + }, + ], + [ + {"minv": 2, "maxv": None, "sharpness_factor": 10}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.0000, 2.0000, 2.0693, 3.0000, 4.0000, 5.0000, 6.0000, 7.0000, 8.0000, 9.0000]), + }, + ], + [ + {"minv": None, "maxv": 7, "sharpness_factor": 10}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([0.0000, 1.0000, 2.0000, 3.0000, 4.0000, 5.0000, 6.0000, 6.9307, 7.0000, 7.0000]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 1.0}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.1266, 2.3124, 2.6907, 3.3065, 4.1088, 5.0000, 5.8912, 6.6935, 7.3093, 7.6877]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 3.0}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.0008, 2.0162, 2.2310, 3.0162, 4.0008, 5.0000, 5.9992, 6.9838, 7.7690, 7.9838]), + }, + ], + [ + {"minv": 2, "maxv": 8, "sharpness_factor": 5.0}, + { + "input": np.arange(10).astype(float), + "clipped": np.array([2.0000, 2.0013, 2.1386, 3.0013, 4.0000, 5.0000, 6.0000, 6.9987, 7.8614, 7.9987]), + }, + ], +] + + +class TestSoftClip(unittest.TestCase): + + @parameterized.expand(TEST_CASES) + def test_result(self, input_param, input_data): + outputs = soft_clip(input_data["input"], **input_param) + expected_val = input_data["clipped"] + if isinstance(outputs, torch.Tensor): + np.testing.assert_allclose( + outputs.detach().cpu().numpy(), expected_val.detach().cpu().numpy(), atol=1e-4, rtol=1e-4 + ) + else: + np.testing.assert_allclose(outputs, expected_val, atol=1e-4, rtol=1e-4) + + +if __name__ == "__main__": + unittest.main() From 8d77605f639e8b7e94105955dc024fb807909c4b Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Tue, 12 Mar 2024 20:02:00 +0100 Subject: [PATCH 02/12] correct soft_clip typing Signed-off-by: Lucas Robinet --- monai/transforms/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index c5744689a3..6c084953f1 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -139,8 +139,8 @@ def soft_clip( arr: NdarrayOrTensor, sharpness_factor: float = 1.0, - minv: float | None = None, - maxv: float | None = None, + minv: NdarrayOrTensor | float | int | None = None, + maxv: NdarrayOrTensor | float | int | None = None, dtype: DtypeLike | torch.dtype = np.float32, ) -> NdarrayOrTensor: """ From fc57eda846e36fb0853b4bcdfb420ce40d237f88 Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Fri, 29 Mar 2024 15:58:34 +0100 Subject: [PATCH 03/12] clarification on docstring, add 3d tests Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 19 ++++-- tests/test_clip_intensity_percentiles.py | 74 ++++++++++++++++++++- tests/test_clip_intensity_percentilesd.py | 80 ++++++++++++++++++++++- 3 files changed, 164 insertions(+), 9 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 13533d3b9b..21185d4247 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1063,17 +1063,25 @@ def __init__( upper: float | None, sharpness_factor: float | None = None, channel_wise: bool = False, + return_percentiles: bool = False, dtype: DtypeLike = np.float32, ) -> None: """ Args: - lower: lower intensity percentile. - upper: upper intensity percentile. + lower: lower intensity percentile. In the case of hard clipping, None will have the same effect as 0 by + not clipping the lowest input values. However, in the case of soft clipping, None and zero will have + two different effects: None will not apply clipping to low values, whereas zero will still transform + the lower values according to the soft clipping transformation. Please check for more details: + https://medium.com/life-at-hopper/clip-it-clip-it-good-1f1bf711b291. + upper: upper intensity percentile. The same as for lower, but this time with the highest values. If we + are looking to perform soft clipping, if None then there will be no effect on this side whereas if set + to 100, the values will be passed via the corresponding clipping equation. sharpness_factor: if not None, the intensity values will be soft clipped according to f(x) = x + (1/sharpness_factor)*softplus(- c(x - minv)) - (1/sharpness_factor)*softplus(c(x - maxv)). defaults to None. channel_wise: if True, compute intensity percentile and normalize every channel separately. default to False. + return_percentiles: if True, return the intensity percentiles used for clipping. dtype: output data type, if None, same as input image. defaults to float32. """ if lower is None and upper is None: @@ -1091,9 +1099,10 @@ def __init__( self.upper = upper self.sharpness_factor = sharpness_factor self.channel_wise = channel_wise + self.return_percentiles = return_percentiles self.dtype = dtype - def _normalize(self, img: NdarrayOrTensor) -> NdarrayOrTensor: + def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: if self.sharpness_factor is not None: lower_percentile = percentile(img, self.lower) if self.lower is not None else None upper_percentile = percentile(img, self.upper) if self.upper is not None else None @@ -1113,9 +1122,9 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img = convert_to_tensor(img, track_meta=get_track_meta()) img_t = convert_to_tensor(img, track_meta=False) if self.channel_wise: - img_t = torch.stack([self._normalize(img=d) for d in img_t]) # type: ignore + img_t = torch.stack([self._clip(img=d) for d in img_t]) # type: ignore else: - img_t = self._normalize(img=img_t) + img_t = self._clip(img=img_t) return convert_to_dst_type(img_t, dst=img)[0] diff --git a/tests/test_clip_intensity_percentiles.py b/tests/test_clip_intensity_percentiles.py index 75b4b54483..f5fe07a323 100644 --- a/tests/test_clip_intensity_percentiles.py +++ b/tests/test_clip_intensity_percentiles.py @@ -18,10 +18,10 @@ from monai.transforms import ClipIntensityPercentiles from monai.transforms.utils import soft_clip from monai.transforms.utils_pytorch_numpy_unification import clip -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose -class TestClipIntensityPercentiles(NumpyImageTestCase2D): +class TestClipIntensityPercentiles2D(NumpyImageTestCase2D): @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_hard_clipping_two_sided(self, p): @@ -111,5 +111,75 @@ def test_ill_both_none(self): ClipIntensityPercentiles(upper=None, lower=None) +class TestClipIntensityPercentiles3D(NumpyImageTestCase3D): + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + hard_clipper = ClipIntensityPercentiles(upper=95, lower=None) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + hard_clipper = ClipIntensityPercentiles(upper=None, lower=5) + im = p(self.imt) + result = hard_clipper(im) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper(im) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result, p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper(im) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + if __name__ == "__main__": unittest.main() diff --git a/tests/test_clip_intensity_percentilesd.py b/tests/test_clip_intensity_percentilesd.py index 21516a2c9c..193fa8b487 100644 --- a/tests/test_clip_intensity_percentilesd.py +++ b/tests/test_clip_intensity_percentilesd.py @@ -19,10 +19,10 @@ from monai.transforms import ClipIntensityPercentilesd from monai.transforms.utils import soft_clip from monai.transforms.utils_pytorch_numpy_unification import clip -from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose +from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose -class TestClipIntensityPercentilesd(NumpyImageTestCase2D): +class TestClipIntensityPercentilesd2D(NumpyImageTestCase2D): @parameterized.expand([[p] for p in TEST_NDARRAYS]) def test_hard_clipping_two_sided(self, p): key = "img" @@ -123,5 +123,81 @@ def test_ill_both_none(self): ClipIntensityPercentilesd(keys=[key], upper=None, lower=None) +class TestClipIntensityPercentilesd3D(NumpyImageTestCase3D): + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_two_sided(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_high(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (0, 95)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_hard_clipping_one_sided_low(self, p): + key = "img" + hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5) + im = p(self.imt) + result = hard_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 100)) + expected = clip(self.imt, lower, upper) + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_two_sided(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower, upper = np.percentile(self.imt, (5, 95)) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=upper) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_high(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + upper = np.percentile(self.imt, 95) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=None, maxv=upper) + # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=5e-5, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_soft_clipping_one_sided_low(self, p): + key = "img" + soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0) + im = p(self.imt) + result = soft_clipper({key: im}) + lower = np.percentile(self.imt, 5) + expected = soft_clip(self.imt, sharpness_factor=1.0, minv=lower, maxv=None) + # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy + assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-6, atol=0) + + @parameterized.expand([[p] for p in TEST_NDARRAYS]) + def test_channel_wise(self, p): + key = "img" + clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True) + im = p(self.imt) + result = clipper({key: im}) + for i, c in enumerate(self.imt): + lower, upper = np.percentile(c, (5, 95)) + expected = clip(c, lower, upper) + assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-7, atol=0) + + if __name__ == "__main__": unittest.main() From 800a0518eff3a531cdc7051ff0e9328009f2eab0 Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Sat, 30 Mar 2024 16:22:25 +0100 Subject: [PATCH 04/12] fixing typo in ClipIntensityPercentile argument Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 21185d4247..0fac4afe9c 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1063,7 +1063,6 @@ def __init__( upper: float | None, sharpness_factor: float | None = None, channel_wise: bool = False, - return_percentiles: bool = False, dtype: DtypeLike = np.float32, ) -> None: """ @@ -1081,7 +1080,6 @@ def __init__( defaults to None. channel_wise: if True, compute intensity percentile and normalize every channel separately. default to False. - return_percentiles: if True, return the intensity percentiles used for clipping. dtype: output data type, if None, same as input image. defaults to float32. """ if lower is None and upper is None: @@ -1099,7 +1097,6 @@ def __init__( self.upper = upper self.sharpness_factor = sharpness_factor self.channel_wise = channel_wise - self.return_percentiles = return_percentiles self.dtype = dtype def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: From b82ec99fd0f13c17cc59b21bdc3ff7eea2adc016 Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Tue, 2 Apr 2024 09:48:58 +0200 Subject: [PATCH 05/12] Adding possibility to return percentiles in tensor metainfo Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 0fac4afe9c..269beacb48 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1063,6 +1063,7 @@ def __init__( upper: float | None, sharpness_factor: float | None = None, channel_wise: bool = False, + return_percentiles: bool = False, dtype: DtypeLike = np.float32, ) -> None: """ @@ -1080,6 +1081,9 @@ def __init__( defaults to None. channel_wise: if True, compute intensity percentile and normalize every channel separately. default to False. + return_percentiles: whether to return the calculated percentiles in tensor meta information, + if soft clipping and percentile is None, return None as the corresponding percentile in meta information. + defaults to False. dtype: output data type, if None, same as input image. defaults to float32. """ if lower is None and upper is None: @@ -1097,6 +1101,9 @@ def __init__( self.upper = upper self.sharpness_factor = sharpness_factor self.channel_wise = channel_wise + if return_percentiles: + self.percentiles = [] + self.return_percentiles = return_percentiles self.dtype = dtype def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: @@ -1109,6 +1116,8 @@ def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: upper_percentile = percentile(img, self.upper) if self.upper is not None else percentile(img, 100) img = clip(img, lower_percentile, upper_percentile) + if self.return_percentiles: + self.percentiles.append((lower_percentile, upper_percentile)) img = convert_to_tensor(img, track_meta=False) return img @@ -1123,7 +1132,11 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: else: img_t = self._clip(img=img_t) - return convert_to_dst_type(img_t, dst=img)[0] + img = convert_to_dst_type(img_t, dst=img)[0] + if self.return_percentiles: + img.meta['percentiles'] = self.percentiles + + return img class AdjustContrast(Transform): From 37b38a3ef6fff6d09c18dfb04d61492abb841529 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 07:49:58 +0000 Subject: [PATCH 06/12] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/transforms/intensity/array.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 269beacb48..c4097f970c 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1081,8 +1081,8 @@ def __init__( defaults to None. channel_wise: if True, compute intensity percentile and normalize every channel separately. default to False. - return_percentiles: whether to return the calculated percentiles in tensor meta information, - if soft clipping and percentile is None, return None as the corresponding percentile in meta information. + return_percentiles: whether to return the calculated percentiles in tensor meta information, + if soft clipping and percentile is None, return None as the corresponding percentile in meta information. defaults to False. dtype: output data type, if None, same as input image. defaults to float32. """ @@ -1135,7 +1135,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img = convert_to_dst_type(img_t, dst=img)[0] if self.return_percentiles: img.meta['percentiles'] = self.percentiles - + return img From 3468369cbab311d0263abde1fd908be6def3c670 Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Tue, 2 Apr 2024 10:08:32 +0200 Subject: [PATCH 07/12] fixing flake8 linting error Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 269beacb48..1313bc2a40 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1081,8 +1081,8 @@ def __init__( defaults to None. channel_wise: if True, compute intensity percentile and normalize every channel separately. default to False. - return_percentiles: whether to return the calculated percentiles in tensor meta information, - if soft clipping and percentile is None, return None as the corresponding percentile in meta information. + return_percentiles: whether to return the calculated percentiles in tensor meta information, + if soft clipping and percentile is None, return None as the corresponding percentile in meta information. defaults to False. dtype: output data type, if None, same as input image. defaults to float32. """ @@ -1134,8 +1134,8 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img = convert_to_dst_type(img_t, dst=img)[0] if self.return_percentiles: - img.meta['percentiles'] = self.percentiles - + img.meta["percentiles"] = self.percentiles + return img From d75e32a1dc5d5deaf73788070e6a968495d218b5 Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Thu, 4 Apr 2024 09:49:21 +0200 Subject: [PATCH 08/12] changing percentiles meta info to clipping values and fixing mypy errors Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index cc21a4710c..4a3acdf4d0 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1104,7 +1104,7 @@ def __init__( self.sharpness_factor = sharpness_factor self.channel_wise = channel_wise if return_percentiles: - self.percentiles = [] + self.clipping_values: Any = [] self.return_percentiles = return_percentiles self.dtype = dtype @@ -1119,7 +1119,7 @@ def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img = clip(img, lower_percentile, upper_percentile) if self.return_percentiles: - self.percentiles.append((lower_percentile, upper_percentile)) + self.clipping_values.append((lower_percentile, upper_percentile)) img = convert_to_tensor(img, track_meta=False) return img @@ -1136,7 +1136,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img = convert_to_dst_type(img_t, dst=img)[0] if self.return_percentiles: - img.meta["percentiles"] = self.percentiles + img.meta["clipping_values"] = self.clipping_values # type: ignore return img From 6dacecef23645944d57acd5201212fee44a297dc Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Thu, 4 Apr 2024 10:03:53 +0200 Subject: [PATCH 09/12] fixing typo with ./runtests.sh --autofix Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 4a3acdf4d0..f7075a1bf4 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1136,7 +1136,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img = convert_to_dst_type(img_t, dst=img)[0] if self.return_percentiles: - img.meta["clipping_values"] = self.clipping_values # type: ignore + img.meta["clipping_values"] = self.clipping_values # type: ignore return img From 6d5187c6c7beadae9f9407dbf8db7891a3118256 Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Thu, 4 Apr 2024 20:03:41 +0200 Subject: [PATCH 10/12] typing correction, docstring clarification, and change in attribute names for ClipIntensityPercentiles class Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 30 ++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index f7075a1bf4..2ee1c70679 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -17,7 +17,7 @@ from abc import abstractmethod from collections.abc import Callable, Iterable, Sequence from functools import partial -from typing import Any +from typing import Any, List, Optional, Tuple from warnings import warn import numpy as np @@ -1063,7 +1063,7 @@ def __init__( upper: float | None, sharpness_factor: float | None = None, channel_wise: bool = False, - return_percentiles: bool = False, + return_clipping_values: bool = False, dtype: DtypeLike = np.float32, ) -> None: """ @@ -1081,11 +1081,10 @@ def __init__( defaults to None. channel_wise: if True, compute intensity percentile and normalize every channel separately. default to False. - return_percentiles: whether to return the calculated percentiles in tensor meta information, - if soft clipping and percentile is None, return None as the corresponding percentile in meta information. - return_percentiles: whether to return the calculated percentiles in tensor meta information, - if soft clipping and percentile is None, return None as the corresponding percentile in meta information. - defaults to False. + return_clipping_values: whether to return the calculated percentiles in tensor meta information. + If soft clipping and requested percentile is None, return None as the corresponding clipping + values in meta information. Clipping values are stored in a list with each element corresponding + to a channel if channel_wise is set to True. defaults to False. dtype: output data type, if None, same as input image. defaults to float32. """ if lower is None and upper is None: @@ -1103,9 +1102,9 @@ def __init__( self.upper = upper self.sharpness_factor = sharpness_factor self.channel_wise = channel_wise - if return_percentiles: - self.clipping_values: Any = [] - self.return_percentiles = return_percentiles + if return_clipping_values: + self.clipping_values: List[Tuple[Optional[float], Optional[float]]] = [] + self.return_clipping_values = return_clipping_values self.dtype = dtype def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: @@ -1118,8 +1117,13 @@ def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: upper_percentile = percentile(img, self.upper) if self.upper is not None else percentile(img, 100) img = clip(img, lower_percentile, upper_percentile) - if self.return_percentiles: - self.clipping_values.append((lower_percentile, upper_percentile)) + if self.return_clipping_values: + self.clipping_values.append( + ( + lower_percentile.item() if lower_percentile else None, + upper_percentile.item() if upper_percentile else None, + ) + ) img = convert_to_tensor(img, track_meta=False) return img @@ -1135,7 +1139,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: img_t = self._clip(img=img_t) img = convert_to_dst_type(img_t, dst=img)[0] - if self.return_percentiles: + if self.return_clipping_values: img.meta["clipping_values"] = self.clipping_values # type: ignore return img From 6eb1eb8a8ec29d1d79d1ed193f57157dba45f27f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 18:05:34 +0000 Subject: [PATCH 11/12] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/transforms/intensity/array.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 2ee1c70679..81d6965b99 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -17,7 +17,7 @@ from abc import abstractmethod from collections.abc import Callable, Iterable, Sequence from functools import partial -from typing import Any, List, Optional, Tuple +from typing import Any from warnings import warn import numpy as np @@ -1103,7 +1103,7 @@ def __init__( self.sharpness_factor = sharpness_factor self.channel_wise = channel_wise if return_clipping_values: - self.clipping_values: List[Tuple[Optional[float], Optional[float]]] = [] + self.clipping_values: list[tuple[float | None, float | None]] = [] self.return_clipping_values = return_clipping_values self.dtype = dtype From 89a4f5fc4c3f9fe227850172720fcb62886dbefe Mon Sep 17 00:00:00 2001 From: Lucas Robinet Date: Fri, 5 Apr 2024 09:15:27 +0200 Subject: [PATCH 12/12] mypy compliant and dealing with potential float or int in clipping values Signed-off-by: Lucas Robinet --- monai/transforms/intensity/array.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 2ee1c70679..386dd9b1fc 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1120,8 +1120,16 @@ def _clip(self, img: NdarrayOrTensor) -> NdarrayOrTensor: if self.return_clipping_values: self.clipping_values.append( ( - lower_percentile.item() if lower_percentile else None, - upper_percentile.item() if upper_percentile else None, + ( + lower_percentile + if lower_percentile is None + else lower_percentile.item() if hasattr(lower_percentile, "item") else lower_percentile + ), + ( + upper_percentile + if upper_percentile is None + else upper_percentile.item() if hasattr(upper_percentile, "item") else upper_percentile + ), ) ) img = convert_to_tensor(img, track_meta=False)