Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add utils for vista3d #7999

Merged
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
3f91409
update mlp block
yiheng-wang-nv Aug 6, 2024
d241db5
add mypy fix
yiheng-wang-nv Aug 6, 2024
1af03f6
remove gelu approximate
yiheng-wang-nv Aug 7, 2024
ac26c78
Merge branch 'dev' into enhance-mlpblock
yiheng-wang-nv Aug 7, 2024
4dc89e5
free space
KumoLiu Aug 7, 2024
b5400b9
ignore test case type annotation
yiheng-wang-nv Aug 7, 2024
906fd16
Merge branch 'enhance-mlpblock' of github.com:yiheng-wang-nv/MONAI in…
yiheng-wang-nv Aug 7, 2024
c53f038
try to fix
KumoLiu Aug 7, 2024
90c7888
Add utils for vista3d
yiheng-wang-nv Aug 7, 2024
12a1e66
add convert_points_to_disc only and fix type issue
yiheng-wang-nv Aug 7, 2024
0af343d
Merge remote-tracking branch 'origin/enhance-mlpblock' into add-utils…
yiheng-wang-nv Aug 7, 2024
87d7363
Merge branch 'dev' into add-utils-for-vista3d
yiheng-wang-nv Aug 7, 2024
681e875
Update monai/transforms/utils.py
yiheng-wang-nv Aug 8, 2024
e069552
update more functions and change morphological path
yiheng-wang-nv Aug 8, 2024
32f37f7
Merge branch 'add-utils-for-vista3d' of github.com:yiheng-wang-nv/MON…
yiheng-wang-nv Aug 8, 2024
dc004cb
add get_largest_connected_component_mask_point
yiheng-wang-nv Aug 8, 2024
503bdd7
add tests
yiheng-wang-nv Aug 8, 2024
e6991fa
Merge branch 'dev' into add-utils-for-vista3d
yiheng-wang-nv Aug 8, 2024
f34bc42
adjust tests
yiheng-wang-nv Aug 8, 2024
53287bb
Merge branch 'add-utils-for-vista3d' of github.com:yiheng-wang-nv/MON…
yiheng-wang-nv Aug 8, 2024
496e0a9
update doc
yiheng-wang-nv Aug 8, 2024
4a8489a
add tests
yiheng-wang-nv Aug 9, 2024
7ea02e5
Merge branch 'dev' into add-utils-for-vista3d
yiheng-wang-nv Aug 9, 2024
021466f
Merge branch 'dev' into add-utils-for-vista3d
KumoLiu Aug 9, 2024
06a0b84
Update monai/transforms/utils.py
yiheng-wang-nv Aug 9, 2024
b84302e
skip if quick
yiheng-wang-nv Aug 9, 2024
cacbe6c
use to device
yiheng-wang-nv Aug 9, 2024
901ca09
Merge branch 'dev' into add-utils-for-vista3d
yiheng-wang-nv Aug 9, 2024
2ac7917
Merge branch 'dev' into add-utils-for-vista3d
KumoLiu Aug 9, 2024
7d15188
add .float before round
yiheng-wang-nv Aug 9, 2024
ffe2bac
Merge branch 'add-utils-for-vista3d' of github.com:yiheng-wang-nv/MON…
yiheng-wang-nv Aug 9, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions monai/transforms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -688,6 +688,7 @@
weighted_patch_samples,
zero_margins,
)
from .utils_morphological_ops import dilate, erode
from .utils_pytorch_numpy_unification import (
allclose,
any_np_pt,
Expand Down
171 changes: 171 additions & 0 deletions monai/transforms/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

import numpy as np
import torch
from torch import Tensor

import monai
from monai.config import DtypeLike, IndexSelection
Expand All @@ -30,6 +31,7 @@
from monai.networks.utils import meshgrid_ij
from monai.transforms.compose import Compose
from monai.transforms.transform import MapTransform, Transform, apply_transform
from monai.transforms.utils_morphological_ops import erode
from monai.transforms.utils_pytorch_numpy_unification import (
any_np_pt,
ascontiguousarray,
Expand Down Expand Up @@ -65,6 +67,8 @@
min_version,
optional_import,
pytorch_after,
unsqueeze_left,
unsqueeze_right,
)
from monai.utils.enums import TransformBackends
from monai.utils.type_conversion import (
Expand Down Expand Up @@ -103,6 +107,8 @@
"generate_spatial_bounding_box",
"get_extreme_points",
"get_largest_connected_component_mask",
"get_largest_connected_component_mask_point",
yiheng-wang-nv marked this conversation as resolved.
Show resolved Hide resolved
"convert_points_to_disc",
"remove_small_objects",
"img_bounds",
"in_bounds",
Expand Down Expand Up @@ -1172,6 +1178,171 @@ def get_largest_connected_component_mask(
return convert_to_dst_type(out, dst=img, dtype=out.dtype)[0]


def get_largest_connected_component_mask_point(
KumoLiu marked this conversation as resolved.
Show resolved Hide resolved
img_pos: Tensor,
img_neg: Tensor,
point_coords: Tensor,
point_labels: Tensor,
pos_val: Sequence[int] = (1, 3),
neg_val: Sequence[int] = (0, 2),
margins: int = 3,
) -> Tensor:
"""
Gets the largest connected component mask of an image that include the point_coords.
# TODO: need author to provide more details about this function. Especially about each argument.
yiheng-wang-nv marked this conversation as resolved.
Show resolved Hide resolved
Args:
img_pos: [1, 1, H, W, D]
img_neg: [1, 1, H, W, D]

point_coords [1, N, 3]
point_labels [1, N]
"""
cucim_skimage, has_cucim = optional_import("cucim.skimage")

use_cp = has_cp and has_cucim and img_pos.device != torch.device("cpu")
if use_cp:
img_pos_ = convert_to_cupy(img_pos.short()) # type: ignore
img_neg_ = convert_to_cupy(img_neg.short()) # type: ignore
label = cucim_skimage.measure.label
lib = cp
else:
raise RuntimeError("Cucim.skimage and GPU device are required.")

features_pos, _ = label(img_pos_, connectivity=3, return_num=True)
features_neg, _ = label(img_neg_, connectivity=3, return_num=True)

pos_val = list(pos_val)
neg_val = list(neg_val)

outs = np.zeros_like(img_pos_)
for bs in range(point_coords.shape[0]):
for i, p in enumerate(point_coords[bs]):
if point_labels[bs, i] in pos_val:
features = features_pos
elif point_labels[bs, i] in neg_val:
features = features_neg
else:
# if -1 padding point, skip
continue
for margin in range(margins):
x, y, z = p.round().int().tolist()
l, r = max(x - margin, 0), min(x + margin + 1, features.shape[-3])
t, d = max(y - margin, 0), min(y + margin + 1, features.shape[-2])
f, b = max(z - margin, 0), min(z + margin + 1, features.shape[-1])
if (features[bs, 0, l:r, t:d, f:b] > 0).any():
index = features[bs, 0, l:r, t:d, f:b].max()
outs[[bs]] += lib.isin(features[[bs]], index)
break
outs[outs > 1] = 1
return convert_to_dst_type(outs, dst=img_pos, dtype=outs.dtype)[0]


def convert_points_to_disc(
image_size: Sequence[int], point: Tensor, point_label: Tensor, radius: int = 2, disc: bool = False
):
"""
Convert a 3D point coordinates into image mask. The returned mask has the same spatial
size as `image_size` while the batch dimension is the same as 'point' batch dimension.
The point is converted to a mask ball with radius defined by `radius`. The output
contains two channels each for negative (first channel) and positive points.

Args:
image_size: The output size of the converted mask. It should be a 3D tuple.
point: [B, N, 3], 3D point coordinates.
point_label: [B, N], 0 or 2 means negative points, 1 or 3 means postive points.
radius: disc ball radius size.
disc: If true, use regular disc other other use gaussian.
yiheng-wang-nv marked this conversation as resolved.
Show resolved Hide resolved
"""
masks = torch.zeros([point.shape[0], 2, image_size[0], image_size[1], image_size[2]], device=point.device)
_array = [
torch.arange(start=0, end=image_size[i], step=1, dtype=torch.float32, device=point.device) for i in range(3)
]
yiheng-wang-nv marked this conversation as resolved.
Show resolved Hide resolved
coord_rows, coord_cols, coord_z = torch.meshgrid(_array[2], _array[1], _array[0])
# [1, 3, h, w, d] -> [b, 2, 3, h, w, d]
coords = unsqueeze_left(torch.stack((coord_rows, coord_cols, coord_z), dim=0), 6)
coords = coords.repeat(point.shape[0], 2, 1, 1, 1, 1)
for b, n in np.ndindex(*point.shape[:2]):
point_bn = unsqueeze_right(point[b, n], 6)
if point_label[b, n] > -1:
channel = 0 if (point_label[b, n] == 0 or point_label[b, n] == 2) else 1
pow_diff = torch.pow(coords[b, channel] - point_bn[b, n], 2)
if disc:
masks[b, channel] += pow_diff.sum(0) < radius**2
else:
masks[b, channel] += torch.exp(-pow_diff.sum(0) / (2 * radius**2))
return masks


def sample_points_from_label(
labels: Tensor,
label_set: Sequence[int],
max_ppoint: int = 1,
max_npoint: int = 0,
device: torch.device | str | None = "cpu",
use_center: bool = False,
):
"""Sample points from labels.

Args:
labels: [1, 1, H, W, D]
label_set: local index, must match values in labels.
max_ppoint: maximum positive point samples.
max_npoint: maximum negative point samples.
device: returned tensor device.
use_center: whether to sample points from center.

Returns:
point: point coordinates of [B, N, 3]. B equals to the length of label_set.
point_label: [B, N], always 0 for negative, 1 for positive.
"""
if not labels.shape[0] == 1:
raise ValueError("labels must have batch size 1.")

if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

labels = labels[0, 0]
unique_labels = labels.unique().cpu().numpy().tolist()
_point = []
_point_label = []
for id in label_set:
if id in unique_labels:
plabels = labels == int(id)
nlabels = ~plabels
_plabels = get_largest_connected_component_mask(erode(plabels.unsqueeze(0).unsqueeze(0))[0, 0])
plabelpoints = torch.nonzero(_plabels).to(device)
if len(plabelpoints) == 0:
plabelpoints = torch.nonzero(plabels).to(device)
nlabelpoints = torch.nonzero(nlabels).to(device)
num_p = min(len(plabelpoints), max_ppoint)
num_n = min(len(nlabelpoints), max_npoint)
pad = max_ppoint + max_npoint - num_p - num_n
if use_center:
pmean = plabelpoints.float().mean(0)
pdis = ((plabelpoints - pmean) ** 2).sum(-1)
_, sorted_indices_tensor = torch.sort(pdis)
sorted_indices = sorted_indices_tensor.cpu().tolist()
else:
sorted_indices = list(range(len(plabelpoints)))
random.shuffle(sorted_indices)
_point.append(
torch.stack(
[plabelpoints[sorted_indices[i]] for i in range(num_p)]
+ random.choices(nlabelpoints, k=num_n)
+ [torch.tensor([0, 0, 0], device=device)] * pad
)
)
_point_label.append(torch.tensor([1] * num_p + [0] * num_n + [-1] * pad).to(device))
else:
# pad the background labels
_point.append(torch.zeros(max_ppoint + max_npoint, 3).to(device))
_point_label.append(torch.zeros(max_ppoint + max_npoint).to(device) - 1)
point = torch.stack(_point)
point_label = torch.stack(_point_label)

return point, point_label


def remove_small_objects(
img: NdarrayTensor,
min_size: int = 64,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
from monai.config import NdarrayOrTensor
from monai.utils import convert_data_type, convert_to_dst_type, ensure_tuple_rep

__all__ = ["erode", "dilate"]
KumoLiu marked this conversation as resolved.
Show resolved Hide resolved


def erode(mask: NdarrayOrTensor, filter_size: int | Sequence[int] = 3, pad_value: float = 1.0) -> NdarrayOrTensor:
"""
Expand Down
1 change: 1 addition & 0 deletions tests/min_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@ def run_testsuit():
"test_zarr_avg_merger",
"test_perceptual_loss",
"test_ultrasound_confidence_map_transform",
"test_vista3d_utils",
]
assert sorted(exclude_cases) == sorted(set(exclude_cases)), f"Duplicated items in {exclude_cases}"

Expand Down
2 changes: 1 addition & 1 deletion tests/test_morphological_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import torch
from parameterized import parameterized

from monai.apps.generation.maisi.utils.morphological_ops import dilate, erode, get_morphological_filter_result_t
from monai.transforms.utils_morphological_ops import dilate, erode, get_morphological_filter_result_t
from tests.utils import TEST_NDARRAYS, assert_allclose

TESTS_SHAPE = []
Expand Down
92 changes: 92 additions & 0 deletions tests/test_vista3d_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import unittest
from unittest.case import skipUnless

import torch
from parameterized import parameterized

from monai.transforms.utils import (
convert_points_to_disc,
get_largest_connected_component_mask_point,
sample_points_from_label,
)
from monai.utils import min_version
from monai.utils.module import optional_import
from tests.utils import skip_if_no_cuda

cp, has_cp = optional_import("cupy")
cucim_skimage, has_cucim = optional_import("cucim.skimage")
measure, has_measure = optional_import("skimage.measure", "0.14.2", min_version)


TESTS_SAMPLE_POINTS_FROM_LABEL = []
for use_center in [True, False]:
labels = torch.zeros(1, 1, 32, 32, 32)
labels[0, 0, 5:10, 5:10, 5:10] = 1
labels[0, 0, 10:15, 10:15, 10:15] = 3
labels[0, 0, 20:25, 20:25, 20:25] = 5
TESTS_SAMPLE_POINTS_FROM_LABEL.append(
[{"labels": labels, "label_set": (1, 3, 5), "use_center": use_center}, (3, 1, 3), (3, 1)]
)

TEST_CONVERT_POINTS_TO_DISC = []
for radius in [1, 2]:
for disc in [True, False]:
image_size = (32, 32, 32)
point = torch.randn(3, 1, 3)
point_label = torch.randint(0, 4, (3, 1))
expected_shape = (point.shape[0], 2, *image_size)
TEST_CONVERT_POINTS_TO_DISC.append(
[
{"image_size": image_size, "point": point, "point_label": point_label, "radius": radius, "disc": disc},
expected_shape,
]
)


@skipUnless(has_measure or has_cucim, "skimage or cucim required")
class TestSamplePointsFromLabel(unittest.TestCase):

@parameterized.expand(TESTS_SAMPLE_POINTS_FROM_LABEL)
def test_shape(self, input_data, expected_point_shape, expected_point_label_shape):
point, point_label = sample_points_from_label(**input_data)
self.assertEqual(point.shape, expected_point_shape)
self.assertEqual(point_label.shape, expected_point_label_shape)


class TestConvertPointsToDisc(unittest.TestCase):

@parameterized.expand(TEST_CONVERT_POINTS_TO_DISC)
def test_shape(self, input_data, expected_shape):
result = convert_points_to_disc(**input_data)
self.assertEqual(result.shape, expected_shape)


@skipUnless(has_cucim and has_cp, "cucim and cupy required")
class TestGetLargestConnectedComponentMaskPoint(unittest.TestCase):

@skip_if_no_cuda
def test_shape(self):
shape = (1, 1, 128, 128, 128)
img_pos = torch.randint(0, 2, shape).cuda()
img_neg = torch.randint(0, 2, shape).cuda()
point_coords = torch.randint(0, 32, (1, 1, 3)).cuda()
point_labels = torch.randint(0, 4, (1, 1)).cuda()
mask = get_largest_connected_component_mask_point(img_pos, img_neg, point_coords, point_labels)
self.assertEqual(mask.shape, shape)


if __name__ == "__main__":
unittest.main()
Loading