diff --git a/monai/handlers/utils.py b/monai/handlers/utils.py index 15d2c59682..60f95d458e 100644 --- a/monai/handlers/utils.py +++ b/monai/handlers/utils.py @@ -130,12 +130,12 @@ class mean median max 5percentile 95percentile notnans if summary_ops is not None: supported_ops = OrderedDict( { - "mean": lambda x: np.nanmean(x), - "median": lambda x: np.nanmedian(x), - "max": lambda x: np.nanmax(x), - "min": lambda x: np.nanmin(x), + "mean": np.nanmean, + "median": np.nanmedian, + "max": np.nanmax, + "min": np.nanmin, "90percentile": lambda x: np.nanpercentile(x[0], x[1]), - "std": lambda x: np.nanstd(x), + "std": np.nanstd, "notnans": lambda x: (~np.isnan(x)).sum(), } ) @@ -149,7 +149,7 @@ def _compute_op(op: str, d: np.ndarray): return c_op(d) threshold = int(op.split("percentile")[0]) - return supported_ops["90percentile"]((d, threshold)) + return supported_ops["90percentile"]((d, threshold)) # type: ignore with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f: f.write(f"class{deli}{deli.join(ops)}\n") diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py index 4bf175769b..38aa2199c6 100644 --- a/monai/transforms/compose.py +++ b/monai/transforms/compose.py @@ -204,14 +204,13 @@ def __init__( def _normalize_probabilities(self, weights): if len(weights) == 0: return weights - else: - weights = np.array(weights) - if np.any(weights < 0): - raise AssertionError("Probabilities must be greater than or equal to zero.") - if np.all(weights == 0): - raise AssertionError("At least one probability must be greater than zero.") - weights = weights / weights.sum() - return list(weights) + weights = np.array(weights) + if np.any(weights < 0): + raise AssertionError("Probabilities must be greater than or equal to zero.") + if np.all(weights == 0): + raise AssertionError("At least one probability must be greater than zero.") + weights = weights / weights.sum() + return list(weights) def flatten(self): transforms = [] @@ -232,16 +231,15 @@ def flatten(self): def __call__(self, data): if len(self.transforms) == 0: return data - else: - index = self.R.multinomial(1, self.weights).argmax() - _transform = self.transforms[index] - data = apply_transform(_transform, data, self.map_items, self.unpack_items) - # if the data is a mapping (dictionary), append the OneOf transform to the end - if isinstance(data, Mapping): - for key in data.keys(): - if key + InverseKeys.KEY_SUFFIX in data: - self.push_transform(data, key, extra_info={"index": index}) - return data + index = self.R.multinomial(1, self.weights).argmax() + _transform = self.transforms[index] + data = apply_transform(_transform, data, self.map_items, self.unpack_items) + # if the data is a mapping (dictionary), append the OneOf transform to the end + if isinstance(data, Mapping): + for key in data.keys(): + if key + InverseKeys.KEY_SUFFIX in data: + self.push_transform(data, key, extra_info={"index": index}) + return data def inverse(self, data): if len(self.transforms) == 0: diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 2590bf2e77..222d6ae17c 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -663,7 +663,6 @@ def __init__( random_size=random_size, allow_missing_keys=allow_missing_keys, ) - MapTransform.__init__(self, keys, allow_missing_keys) self.roi_scale = roi_scale self.max_roi_scale = max_roi_scale diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index a8babfc659..3be205996e 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1457,7 +1457,7 @@ def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> Union[torch.Tensor, raise RuntimeError("Image needs a channel direction.") if isinstance(self.loc[0], int) and len(img.shape) == 4 and len(self.loc) == 2: raise RuntimeError("Input images of dimension 4 need location tuple to be length 3 or 4") - if isinstance(self.loc[0], Sequence) and len(img.shape) == 4 and min(map(lambda x: len(x), self.loc)) == 2: + if isinstance(self.loc[0], Sequence) and len(img.shape) == 4 and min(map(len, self.loc)) == 2: raise RuntimeError("Input images of dimension 4 need location tuple to be length 3 or 4") n_dims = len(img.shape[1:]) diff --git a/monai/transforms/post/array.py b/monai/transforms/post/array.py index a7280cfe98..d20f368109 100644 --- a/monai/transforms/post/array.py +++ b/monai/transforms/post/array.py @@ -205,7 +205,7 @@ def __call__( rounding = self.rounding if rounding is None else rounding if rounding is not None: - rounding = look_up_option(rounding, ["torchrounding"]) + look_up_option(rounding, ["torchrounding"]) img = torch.round(img) return img.float() diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 01bd0cec6d..cb4e69a509 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -335,8 +335,7 @@ def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor: """ if isinstance(img, np.ndarray): return np.ascontiguousarray(np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))) - else: - return torch.flip(img, map_spatial_axes(img.ndim, self.spatial_axis)) + return torch.flip(img, map_spatial_axes(img.ndim, self.spatial_axis)) class Resize(Transform): diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 57d43db5d0..3df53f8850 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -391,9 +391,8 @@ def __call__(self, data: NdarrayOrTensor): if self.data_type == "tensor": dtype_ = get_equivalent_dtype(self.dtype, torch.Tensor) return convert_to_tensor(data, dtype=dtype_, device=self.device) - else: - dtype_ = get_equivalent_dtype(self.dtype, np.ndarray) - return convert_to_numpy(data, dtype=dtype_) + dtype_ = get_equivalent_dtype(self.dtype, np.ndarray) + return convert_to_numpy(data, dtype=dtype_) class ToNumpy(Transform): @@ -1091,11 +1090,11 @@ def __call__( img_ = img[mask] supported_ops = { - "mean": lambda x: np.nanmean(x), - "median": lambda x: np.nanmedian(x), - "max": lambda x: np.nanmax(x), - "min": lambda x: np.nanmin(x), - "std": lambda x: np.nanstd(x), + "mean": np.nanmean, + "median": np.nanmedian, + "max": np.nanmax, + "min": np.nanmin, + "std": np.nanstd, } def _compute(op: Callable, data: np.ndarray): @@ -1107,7 +1106,7 @@ def _compute(op: Callable, data: np.ndarray): for o in self.ops: if isinstance(o, str): o = look_up_option(o, supported_ops.keys()) - meta_data[self.key_prefix + "_" + o] = _compute(supported_ops[o], img_) + meta_data[self.key_prefix + "_" + o] = _compute(supported_ops[o], img_) # type: ignore elif callable(o): meta_data[self.key_prefix + "_custom_" + str(custom_index)] = _compute(o, img_) custom_index += 1 diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 695753a07d..4fb07644a9 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -15,7 +15,6 @@ Class names are ended with 'd' to denote dictionary-based transforms. """ -import copy import logging import re from copy import deepcopy @@ -886,7 +885,7 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, N if isinstance(val, torch.Tensor): d[new_key] = val.detach().clone() else: - d[new_key] = copy.deepcopy(val) + d[new_key] = deepcopy(val) return d diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index a627a7544a..fbd4a6b12b 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -20,12 +20,11 @@ import torch import monai -import monai.transforms.transform from monai.config import DtypeLike, IndexSelection from monai.config.type_definitions import NdarrayOrTensor from monai.networks.layers import GaussianFilter from monai.transforms.compose import Compose, OneOf -from monai.transforms.transform import MapTransform, Transform +from monai.transforms.transform import MapTransform, Transform, apply_transform from monai.transforms.utils_pytorch_numpy_unification import any_np_pt, nonzero, ravel, unravel_index from monai.utils import ( GridSampleMode, @@ -1330,9 +1329,7 @@ def _get_data(obj, key): prev_data = _get_data(test_data, key) prev_type = type(prev_data) prev_device = prev_data.device if isinstance(prev_data, torch.Tensor) else None - test_data = monai.transforms.transform.apply_transform( - _transform, test_data, transform.map_items, transform.unpack_items - ) + test_data = apply_transform(_transform, test_data, transform.map_items, transform.unpack_items) # every time the type or device changes, increment the counter curr_data = _get_data(test_data, key) curr_device = curr_data.device if isinstance(curr_data, torch.Tensor) else None diff --git a/monai/transforms/utils_pytorch_numpy_unification.py b/monai/transforms/utils_pytorch_numpy_unification.py index 7b65f690c0..4283c4a81f 100644 --- a/monai/transforms/utils_pytorch_numpy_unification.py +++ b/monai/transforms/utils_pytorch_numpy_unification.py @@ -159,8 +159,7 @@ def floor_divide(a: NdarrayOrTensor, b) -> NdarrayOrTensor: if is_module_ver_at_least(torch, (1, 8, 0)): return torch.div(a, b, rounding_mode="floor") return torch.floor_divide(a, b) - else: - return np.floor_divide(a, b) + return np.floor_divide(a, b) def unravel_index(idx, shape): diff --git a/tests/test_intensity_stats.py b/tests/test_intensity_stats.py index 059271e442..92a2c04585 100644 --- a/tests/test_intensity_stats.py +++ b/tests/test_intensity_stats.py @@ -31,7 +31,7 @@ ] TEST_CASE_3 = [ - {"ops": [lambda x: np.mean(x), "max", lambda x: np.min(x)], "key_prefix": "orig"}, + {"ops": [np.mean, "max", np.min], "key_prefix": "orig"}, np.array([[[0.0, 1.0], [2.0, 3.0]]]), None, {"orig_custom_0": 1.5, "orig_max": 3.0, "orig_custom_1": 0.0}, diff --git a/tests/test_intensity_statsd.py b/tests/test_intensity_statsd.py index 8c8bc8795a..596c80deb5 100644 --- a/tests/test_intensity_statsd.py +++ b/tests/test_intensity_statsd.py @@ -34,7 +34,7 @@ ] TEST_CASE_3 = [ - {"keys": "img", "ops": [lambda x: np.mean(x), "max", lambda x: np.min(x)], "key_prefix": "orig"}, + {"keys": "img", "ops": [np.mean, "max", np.min], "key_prefix": "orig"}, {"img": np.array([[[0.0, 1.0], [2.0, 3.0]]])}, "img_meta_dict", {"orig_custom_0": 1.5, "orig_max": 3.0, "orig_custom_1": 0.0},