Skip to content

Commit

Permalink
Fix precision issue in TestClipIntensityPercentiles3D (#7808)
Browse files Browse the repository at this point in the history
Fixes #7797

### Description
Ensure the same dtype when test to avoid precision issue.

### Types of changes
<!--- Put an `x` in all the boxes that apply, and remove the not
applicable items -->
- [x] Non-breaking change (fix or new feature that would not break
existing functionality).
- [ ] Breaking change (fix or new feature that would cause existing
functionality to change).
- [ ] New tests added to cover the changes.
- [ ] Integration tests passed locally by running `./runtests.sh -f -u
--net --coverage`.
- [ ] Quick tests passed locally by running `./runtests.sh --quick
--unittests --disttests`.
- [ ] In-line docstrings updated.
- [ ] Documentation updated, tested `make html` command in the `docs/`
folder.

---------

Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
KumoLiu and pre-commit-ci[bot] authored May 28, 2024
1 parent 94ab632 commit 762b525
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 63 deletions.
75 changes: 44 additions & 31 deletions tests/test_clip_intensity_percentiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,74 +18,92 @@
from monai.transforms import ClipIntensityPercentiles
from monai.transforms.utils import soft_clip
from monai.transforms.utils_pytorch_numpy_unification import clip, percentile
from monai.utils.type_conversion import convert_to_tensor
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose


def test_hard_clip_func(im, lower, upper):
im_t = convert_to_tensor(im)
if lower is None:
upper = percentile(im_t, upper)
elif upper is None:
lower = percentile(im_t, lower)
else:
lower, upper = percentile(im_t, (lower, upper))
return clip(im_t, lower, upper)


def test_soft_clip_func(im, lower, upper):
im_t = convert_to_tensor(im)
if lower is None:
upper = percentile(im_t, upper)
elif upper is None:
lower = percentile(im_t, lower)
else:
lower, upper = percentile(im_t, (lower, upper))
return soft_clip(im_t, minv=lower, maxv=upper, sharpness_factor=1.0, dtype=torch.float32)


class TestClipIntensityPercentiles2D(NumpyImageTestCase2D):

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=None)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentiles(upper=None, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_channel_wise(self, p):
clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper(im)
for i, c in enumerate(im):
im_t = convert_to_tensor(self.imt)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-4, atol=0)
Expand Down Expand Up @@ -118,35 +136,31 @@ def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentiles(upper=95, lower=None)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentiles(upper=None, lower=5)
im = p(self.imt)
result = hard_clipper(im)
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

Expand All @@ -155,27 +169,26 @@ def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentiles(upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentiles(upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper(im)
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result, p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
def test_channel_wise(self, p):
clipper = ClipIntensityPercentiles(upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper(im)
for i, c in enumerate(im):
im_t = convert_to_tensor(self.imt)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[i], p(expected), type_test="tensor", rtol=1e-4, atol=0)
Expand Down
55 changes: 23 additions & 32 deletions tests/test_clip_intensity_percentilesd.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@

import unittest

import torch
from parameterized import parameterized

from monai.transforms import ClipIntensityPercentilesd
from monai.transforms.utils import soft_clip
from monai.transforms.utils_pytorch_numpy_unification import clip, percentile
from monai.utils.type_conversion import convert_to_tensor
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, NumpyImageTestCase3D, assert_allclose

from .test_clip_intensity_percentiles import test_hard_clip_func, test_soft_clip_func


class TestClipIntensityPercentilesd2D(NumpyImageTestCase2D):

Expand All @@ -30,8 +31,7 @@ def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -40,8 +40,7 @@ def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -50,8 +49,7 @@ def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -60,9 +58,8 @@ def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -71,9 +68,8 @@ def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -82,9 +78,8 @@ def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -93,7 +88,8 @@ def test_channel_wise(self, p):
clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper({key: im})
for i, c in enumerate(im):
im_t = convert_to_tensor(self.imt)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-3, atol=0)
Expand Down Expand Up @@ -132,8 +128,7 @@ def test_hard_clipping_two_sided(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -142,8 +137,7 @@ def test_hard_clipping_one_sided_high(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (0, 95))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 0, 95)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -152,8 +146,7 @@ def test_hard_clipping_one_sided_low(self, p):
hard_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5)
im = p(self.imt)
result = hard_clipper({key: im})
lower, upper = percentile(im, (5, 100))
expected = clip(im, lower, upper)
expected = test_hard_clip_func(im, 5, 100)
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -162,8 +155,7 @@ def test_soft_clipping_two_sided(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower, upper = percentile(im, (5, 95))
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=upper, dtype=torch.float32)
expected = test_soft_clip_func(im, 5, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

Expand All @@ -173,9 +165,8 @@ def test_soft_clipping_one_sided_high(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=None, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
upper = percentile(im, 95)
expected = soft_clip(im, sharpness_factor=1.0, minv=None, maxv=upper, dtype=torch.float32)
# the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
expected = test_soft_clip_func(im, None, 95)
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

@parameterized.expand([[p] for p in TEST_NDARRAYS])
Expand All @@ -184,8 +175,7 @@ def test_soft_clipping_one_sided_low(self, p):
soft_clipper = ClipIntensityPercentilesd(keys=[key], upper=None, lower=5, sharpness_factor=1.0)
im = p(self.imt)
result = soft_clipper({key: im})
lower = percentile(im, 5)
expected = soft_clip(im, sharpness_factor=1.0, minv=lower, maxv=None, dtype=torch.float32)
expected = test_soft_clip_func(im, 5, None)
# the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
assert_allclose(result[key], p(expected), type_test="tensor", rtol=1e-4, atol=0)

Expand All @@ -195,7 +185,8 @@ def test_channel_wise(self, p):
clipper = ClipIntensityPercentilesd(keys=[key], upper=95, lower=5, channel_wise=True)
im = p(self.imt)
result = clipper({key: im})
for i, c in enumerate(im):
im_t = convert_to_tensor(im)
for i, c in enumerate(im_t):
lower, upper = percentile(c, (5, 95))
expected = clip(c, lower, upper)
assert_allclose(result[key][i], p(expected), type_test="tensor", rtol=1e-4, atol=0)
Expand Down

0 comments on commit 762b525

Please sign in to comment.