From 99f4c5dbf6a5f4aae31cc4371d63a28f029f1d9d Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Sun, 5 Feb 2023 12:35:11 +0100 Subject: [PATCH 01/68] initial commit, reformatted original files --- monai/apps/auto3dseg/auto_runner.py | 2 -- monai/apps/auto3dseg/bundle_gen.py | 3 --- monai/apps/auto3dseg/data_analyzer.py | 2 -- monai/apps/auto3dseg/ensemble_builder.py | 2 -- monai/apps/auto3dseg/hpo_gen.py | 2 -- monai/apps/deepedit/interaction.py | 1 - monai/apps/deepedit/transforms.py | 1 - monai/apps/deepgrow/interaction.py | 1 - monai/apps/detection/utils/anchor_utils.py | 1 - monai/apps/nuclick/transforms.py | 1 - monai/apps/pathology/metrics/lesion_froc.py | 1 - monai/apps/pathology/transforms/post/array.py | 1 - monai/apps/pathology/transforms/spatial/array.py | 2 -- monai/apps/pathology/transforms/spatial/dictionary.py | 1 - monai/apps/reconstruction/transforms/dictionary.py | 1 - monai/auto3dseg/analyzer.py | 4 ---- monai/auto3dseg/seg_summarizer.py | 1 - monai/config/deviceconfig.py | 1 - monai/data/image_reader.py | 2 +- monai/data/thread_buffer.py | 1 - monai/data/utils.py | 1 - monai/engines/evaluator.py | 1 - monai/losses/ds_loss.py | 1 - monai/losses/ssim_loss.py | 1 - monai/metrics/regression.py | 1 - monai/networks/blocks/crf.py | 1 - monai/networks/blocks/denseblock.py | 1 - monai/networks/blocks/dynunet_block.py | 1 - monai/networks/blocks/segresnet_block.py | 2 -- monai/networks/blocks/squeeze_and_excitation.py | 3 --- monai/networks/layers/filtering.py | 1 - monai/networks/layers/simplelayers.py | 3 --- monai/networks/nets/ahnet.py | 8 ++------ monai/networks/nets/autoencoder.py | 1 - monai/networks/nets/densenet.py | 1 - monai/networks/nets/dints.py | 1 - monai/networks/nets/flexible_unet.py | 2 -- monai/networks/nets/highresnet.py | 3 +-- monai/networks/nets/hovernet.py | 8 -------- monai/networks/nets/milmodel.py | 9 --------- monai/networks/nets/resnet.py | 2 -- monai/networks/nets/segresnet_ds.py | 5 ----- monai/networks/nets/senet.py | 2 -- monai/networks/nets/swin_unetr.py | 2 -- monai/networks/nets/transchex.py | 2 -- monai/networks/nets/unet.py | 2 -- monai/networks/nets/varautoencoder.py | 1 - monai/transforms/adaptors.py | 2 -- monai/transforms/intensity/array.py | 7 ------- monai/transforms/intensity/dictionary.py | 4 ---- monai/transforms/spatial/array.py | 3 --- monai/transforms/spatial/dictionary.py | 4 ++-- monai/transforms/utility/array.py | 2 -- monai/transforms/utility/dictionary.py | 1 - monai/transforms/utils_create_transform_ims.py | 1 - monai/utils/profiling.py | 3 --- monai/visualize/utils.py | 1 - monai/visualize/visualizer.py | 1 - tests/min_tests.py | 1 - tests/runner.py | 1 - tests/test_anchor_box.py | 1 - tests/test_auto3dseg.py | 2 -- tests/test_auto3dseg_hpo.py | 1 - tests/test_bilateral_approx_cpu.py | 2 -- tests/test_bilateral_approx_cuda.py | 2 -- tests/test_bilateral_precise.py | 4 ---- tests/test_cast_to_type.py | 1 - tests/test_compute_generalized_dice.py | 2 -- tests/test_compute_meandice.py | 2 -- tests/test_compute_meaniou.py | 2 -- tests/test_compute_regression_metrics.py | 4 ---- tests/test_crf_cpu.py | 1 - tests/test_crf_cuda.py | 1 - tests/test_crop_foregroundd.py | 1 - tests/test_cumulative_average.py | 1 - tests/test_cumulative_average_dist.py | 1 - tests/test_dataset_summary.py | 2 -- tests/test_efficientnet.py | 1 - tests/test_ensure_tuple.py | 1 - tests/test_fg_bg_to_indicesd.py | 1 - tests/test_flexible_unet.py | 2 -- tests/test_generate_instance_contour.py | 1 - tests/test_gmm.py | 1 - tests/test_handler_classification_saver.py | 1 - tests/test_handler_logfile.py | 1 - tests/test_handler_mlflow.py | 1 - tests/test_handler_prob_map_producer.py | 1 - tests/test_handler_regression_metrics.py | 1 - tests/test_handler_tb_image.py | 1 - tests/test_handler_tb_stats.py | 2 -- tests/test_hilbert_transform.py | 1 - tests/test_integration_classification_2d.py | 1 - tests/test_inverse.py | 3 --- tests/test_inverse_collation.py | 1 - tests/test_k_space_spike_noise.py | 2 -- tests/test_k_space_spike_noised.py | 2 -- tests/test_lr_finder.py | 1 - tests/test_masked_inference_wsi_dataset.py | 1 - tests/test_mlp.py | 1 - tests/test_network_consistency.py | 1 - tests/test_nifti_endianness.py | 1 - tests/test_pad_collation.py | 1 - tests/test_phl_cpu.py | 1 - tests/test_phl_cuda.py | 1 - tests/test_rand_k_space_spike_noised.py | 1 - tests/test_segresnet_ds.py | 2 -- tests/test_selfattention.py | 1 - tests/test_squeezedim.py | 2 -- tests/test_state_cacher.py | 1 - tests/test_tile_on_grid.py | 3 --- tests/test_tile_on_grid_dict.py | 3 --- tests/test_trainable_bilateral.py | 4 ---- tests/test_trainable_joint_bilateral.py | 4 ---- tests/test_transformerblock.py | 1 - tests/testing_data/integration_answers.py | 2 +- tests/utils.py | 1 - 116 files changed, 7 insertions(+), 209 deletions(-) diff --git a/monai/apps/auto3dseg/auto_runner.py b/monai/apps/auto3dseg/auto_runner.py index 2d83b0690f..60b58fca27 100644 --- a/monai/apps/auto3dseg/auto_runner.py +++ b/monai/apps/auto3dseg/auto_runner.py @@ -223,7 +223,6 @@ def __init__( templates_path_or_url: str | None = None, **kwargs, ): - logger.info(f"AutoRunner using work directory {work_dir}") os.makedirs(work_dir, exist_ok=True) @@ -641,7 +640,6 @@ def run(self): # step 2: algorithm generation if self.algo_gen: - if not os.path.isfile(self.datastats_filename): raise ValueError( f"Could not find the datastats file {self.datastats_filename}. " diff --git a/monai/apps/auto3dseg/bundle_gen.py b/monai/apps/auto3dseg/bundle_gen.py index 397244a73a..54af2b99d2 100644 --- a/monai/apps/auto3dseg/bundle_gen.py +++ b/monai/apps/auto3dseg/bundle_gen.py @@ -377,9 +377,7 @@ def __init__( data_stats_filename: str | None = None, data_src_cfg_name: str | None = None, ): - if algos is None or isinstance(algos, (list, tuple, str)): - if templates_path_or_url is None: templates_path_or_url = default_algo_zip @@ -404,7 +402,6 @@ def __init__( self.algos: Any = [] if isinstance(algos, dict): for algo_name, algo_params in algos.items(): - template_path = os.path.dirname(algo_params.get("template_path", ".")) if len(template_path) > 0 and template_path not in sys.path: sys.path.append(template_path) diff --git a/monai/apps/auto3dseg/data_analyzer.py b/monai/apps/auto3dseg/data_analyzer.py index ffa52f6646..9176ede626 100644 --- a/monai/apps/auto3dseg/data_analyzer.py +++ b/monai/apps/auto3dseg/data_analyzer.py @@ -214,7 +214,6 @@ def get_all_case_stats(self, key="training", transform_list=None): Orientationd(keys=keys, axcodes="RAS"), ] if self.label_key is not None: - allowed_shape_difference = self.extra_params.pop("allowed_shape_difference", 5) transform_list.append( EnsureSameShaped( @@ -235,7 +234,6 @@ def get_all_case_stats(self, key="training", transform_list=None): warnings.warn("tqdm is not installed. not displaying the caching progress.") for batch_data in tqdm(dataloader) if has_tqdm else dataloader: - batch_data = batch_data[0] batch_data[self.image_key] = batch_data[self.image_key].to(self.device) diff --git a/monai/apps/auto3dseg/ensemble_builder.py b/monai/apps/auto3dseg/ensemble_builder.py index 224b412145..548911d0c7 100644 --- a/monai/apps/auto3dseg/ensemble_builder.py +++ b/monai/apps/auto3dseg/ensemble_builder.py @@ -180,7 +180,6 @@ class AlgoEnsembleBestN(AlgoEnsemble): """ def __init__(self, n_best: int = 5): - super().__init__() self.n_best = n_best @@ -225,7 +224,6 @@ class AlgoEnsembleBestByFold(AlgoEnsemble): """ def __init__(self, n_fold: int = 5): - super().__init__() self.n_fold = n_fold diff --git a/monai/apps/auto3dseg/hpo_gen.py b/monai/apps/auto3dseg/hpo_gen.py index 922c93790f..ec18d897b8 100644 --- a/monai/apps/auto3dseg/hpo_gen.py +++ b/monai/apps/auto3dseg/hpo_gen.py @@ -197,7 +197,6 @@ def generate(self, output_folder: str = ".") -> None: if isinstance(self.algo, BundleAlgo): self.algo.export_to_disk(output_folder, task_prefix + task_id, fill_with_datastats=False) else: - ConfigParser.export_config_file(self.params, write_path) logger.info(write_path) @@ -376,7 +375,6 @@ def generate(self, output_folder: str = ".") -> None: if isinstance(self.algo, BundleAlgo): self.algo.export_to_disk(output_folder, task_prefix + task_id, fill_with_datastats=False) else: - ConfigParser.export_config_file(self.params, write_path) logger.info(write_path) diff --git a/monai/apps/deepedit/interaction.py b/monai/apps/deepedit/interaction.py index 05623cf248..4c5bd3ec2b 100644 --- a/monai/apps/deepedit/interaction.py +++ b/monai/apps/deepedit/interaction.py @@ -51,7 +51,6 @@ def __init__( click_probability_key: str = "probability", max_interactions: int = 1, ) -> None: - self.deepgrow_probability = deepgrow_probability self.transforms = Compose(transforms) if not isinstance(transforms, Compose) else transforms self.train = train diff --git a/monai/apps/deepedit/transforms.py b/monai/apps/deepedit/transforms.py index 89790b2624..0e2a0d037d 100644 --- a/monai/apps/deepedit/transforms.py +++ b/monai/apps/deepedit/transforms.py @@ -529,7 +529,6 @@ def find_guidance(self, discrepancy): return None def add_guidance(self, guidance, discrepancy, label_names, labels): - # Positive clicks of the segment in the iteration pos_discr = discrepancy[0] # idx 0 is positive discrepancy and idx 1 is negative discrepancy diff --git a/monai/apps/deepgrow/interaction.py b/monai/apps/deepgrow/interaction.py index c134d45d22..41ebd2fbe7 100644 --- a/monai/apps/deepgrow/interaction.py +++ b/monai/apps/deepgrow/interaction.py @@ -46,7 +46,6 @@ def __init__( train: bool, key_probability: str = "probability", ) -> None: - if not isinstance(transforms, Compose): transforms = Compose(transforms) diff --git a/monai/apps/detection/utils/anchor_utils.py b/monai/apps/detection/utils/anchor_utils.py index 379c972f6d..12bf90585d 100644 --- a/monai/apps/detection/utils/anchor_utils.py +++ b/monai/apps/detection/utils/anchor_utils.py @@ -373,7 +373,6 @@ def __init__( | Sequence[Sequence[float]] = ((32, 32, 32), (48, 20, 20), (20, 48, 20), (20, 20, 48)), indexing: str = "ij", ) -> None: - nn.Module.__init__(self) spatial_dims = len(base_anchor_shapes[0]) diff --git a/monai/apps/nuclick/transforms.py b/monai/apps/nuclick/transforms.py index a9fbaa3144..7ec416dabb 100644 --- a/monai/apps/nuclick/transforms.py +++ b/monai/apps/nuclick/transforms.py @@ -151,7 +151,6 @@ def __init__( others_value: int = 0, to_binary_mask: bool = True, ): - super().__init__(keys, allow_missing_keys=False) self.others = others self.mask_value = mask_value diff --git a/monai/apps/pathology/metrics/lesion_froc.py b/monai/apps/pathology/metrics/lesion_froc.py index 67e8a8510a..d216fa1d2e 100644 --- a/monai/apps/pathology/metrics/lesion_froc.py +++ b/monai/apps/pathology/metrics/lesion_froc.py @@ -74,7 +74,6 @@ def __init__( nms_box_size: int = 48, image_reader_name: str = "cuCIM", ) -> None: - self.data = data self.grow_distance = grow_distance self.itc_diameter = itc_diameter diff --git a/monai/apps/pathology/transforms/post/array.py b/monai/apps/pathology/transforms/post/array.py index ba005706a5..5289dc101c 100644 --- a/monai/apps/pathology/transforms/post/array.py +++ b/monai/apps/pathology/transforms/post/array.py @@ -252,7 +252,6 @@ class GenerateDistanceMap(Transform): backend = [TransformBackends.NUMPY] def __init__(self, smooth_fn: Callable | None = None, dtype: DtypeLike = np.float32) -> None: - self.smooth_fn = smooth_fn if smooth_fn is not None else GaussianSmooth() self.dtype = dtype diff --git a/monai/apps/pathology/transforms/spatial/array.py b/monai/apps/pathology/transforms/spatial/array.py index 9733156c85..ea8a8c89a9 100644 --- a/monai/apps/pathology/transforms/spatial/array.py +++ b/monai/apps/pathology/transforms/spatial/array.py @@ -165,7 +165,6 @@ def __init__( raise ValueError("Unsupported filter_mode, must be [min, max or random]: " + str(self.filter_mode)) def randomize(self, img_size: Sequence[int]) -> None: - c, h, w = img_size self.offset = (0, 0) @@ -239,7 +238,6 @@ def __call__(self, image: NdarrayOrTensor) -> NdarrayOrTensor: else: if len(img_np) > self.tile_count: - if self.filter_mode == "min": # default, keep non-background tiles (smallest values) idxs = np.argsort(img_np.sum(axis=(1, 2, 3)))[: self.tile_count] diff --git a/monai/apps/pathology/transforms/spatial/dictionary.py b/monai/apps/pathology/transforms/spatial/dictionary.py index a304ea6d62..8166a5891d 100644 --- a/monai/apps/pathology/transforms/spatial/dictionary.py +++ b/monai/apps/pathology/transforms/spatial/dictionary.py @@ -121,7 +121,6 @@ def randomize(self, data: Any = None) -> None: def __call__( self, data: Mapping[Hashable, NdarrayOrTensor] ) -> dict[Hashable, NdarrayOrTensor] | list[dict[Hashable, NdarrayOrTensor]]: - self.randomize() d = dict(data) diff --git a/monai/apps/reconstruction/transforms/dictionary.py b/monai/apps/reconstruction/transforms/dictionary.py index f475b9870d..11454b0b6b 100644 --- a/monai/apps/reconstruction/transforms/dictionary.py +++ b/monai/apps/reconstruction/transforms/dictionary.py @@ -213,7 +213,6 @@ class ReferenceBasedSpatialCropd(Cropd): """ def __init__(self, keys: KeysCollection, ref_key: str, allow_missing_keys: bool = False) -> None: - super().__init__(keys, cropper=None, allow_missing_keys=allow_missing_keys) # type: ignore self.ref_key = ref_key diff --git a/monai/auto3dseg/analyzer.py b/monai/auto3dseg/analyzer.py index af24b5c907..a0eeb0a9ca 100644 --- a/monai/auto3dseg/analyzer.py +++ b/monai/auto3dseg/analyzer.py @@ -199,7 +199,6 @@ class ImageStats(Analyzer): """ def __init__(self, image_key: str, stats_name: str = "image_stats") -> None: - if not isinstance(image_key, str): raise ValueError("image_key input must be str") @@ -292,7 +291,6 @@ class FgImageStats(Analyzer): """ def __init__(self, image_key: str, label_key: str, stats_name: str = "image_foreground_stats"): - self.image_key = image_key self.label_key = label_key @@ -375,7 +373,6 @@ class LabelStats(Analyzer): """ def __init__(self, image_key: str, label_key: str, stats_name: str = "label_stats", do_ccp: bool | None = True): - self.image_key = image_key self.label_key = label_key self.do_ccp = do_ccp @@ -857,7 +854,6 @@ def __init__( hist_bins: list[int] | int | None = None, hist_range: list | None = None, ): - self.image_key = image_key # set defaults diff --git a/monai/auto3dseg/seg_summarizer.py b/monai/auto3dseg/seg_summarizer.py index 22db58f76e..deeafa212e 100644 --- a/monai/auto3dseg/seg_summarizer.py +++ b/monai/auto3dseg/seg_summarizer.py @@ -89,7 +89,6 @@ def __init__( hist_range: list | None = None, histogram_only: bool = False, ) -> None: - self.image_key = image_key self.label_key = label_key # set defaults diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 16620b53c7..7567c9c7b0 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -191,7 +191,6 @@ def print_system_info(file=sys.stdout) -> None: def get_gpu_info() -> OrderedDict: - output: OrderedDict = OrderedDict() num_gpus = torch.cuda.device_count() diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index c1cfcfd8ca..d03028d198 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -616,7 +616,7 @@ def get_data(self, data) -> tuple[np.ndarray, dict]: img_array: list[np.ndarray] = [] compatible_meta: dict = {} - for (data_array, metadata) in ensure_tuple(dicom_data): + for data_array, metadata in ensure_tuple(dicom_data): img_array.append(np.ascontiguousarray(np.swapaxes(data_array, 0, 1) if self.swap_ij else data_array)) affine = self._get_affine(metadata, self.affine_lps_to_ras) metadata[MetaKeys.SPACE] = SpaceKeys.RAS if self.affine_lps_to_ras else SpaceKeys.LPS diff --git a/monai/data/thread_buffer.py b/monai/data/thread_buffer.py index e86a4043a2..fc7826fb15 100644 --- a/monai/data/thread_buffer.py +++ b/monai/data/thread_buffer.py @@ -66,7 +66,6 @@ def stop(self): self.gen_thread = None def __iter__(self): - self.is_running = True self.gen_thread = Thread(target=self.enqueue_values, daemon=True) self.gen_thread.start() diff --git a/monai/data/utils.py b/monai/data/utils.py index 96e3e15d95..5d6869334b 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -1073,7 +1073,6 @@ def compute_importance_map( if mode == BlendMode.CONSTANT: importance_map = torch.ones(patch_size, device=device, dtype=torch.float) elif mode == BlendMode.GAUSSIAN: - sigma_scale = ensure_tuple_rep(sigma_scale, len(patch_size)) sigmas = [i * sigma_s for i, sigma_s in zip(patch_size, sigma_scale)] diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index 468ffdb19a..0829c07981 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -298,7 +298,6 @@ def _iteration(self, engine: SupervisedEvaluator, batchdata: dict[str, torch.Ten # execute forward computation with engine.mode(engine.network): - if engine.amp: with torch.cuda.amp.autocast(**engine.amp_kwargs): engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs) diff --git a/monai/losses/ds_loss.py b/monai/losses/ds_loss.py index f6b6bcc14b..da460178ca 100644 --- a/monai/losses/ds_loss.py +++ b/monai/losses/ds_loss.py @@ -71,7 +71,6 @@ def get_loss(self, input: torch.Tensor, target: torch.Tensor): return self.loss(input, target) def forward(self, input: torch.Tensor | list[torch.Tensor], target: torch.Tensor): - if isinstance(input, (list, tuple)): weights = self.get_weights(levels=len(input)) loss = torch.tensor(0, dtype=torch.float, device=target.device) diff --git a/monai/losses/ssim_loss.py b/monai/losses/ssim_loss.py index 0bb6163770..e8e5d0c2ba 100644 --- a/monai/losses/ssim_loss.py +++ b/monai/losses/ssim_loss.py @@ -87,7 +87,6 @@ def forward(self, x: torch.Tensor, y: torch.Tensor, data_range: torch.Tensor) -> data_range, self.win_size, self.k1, self.k2, self.spatial_dims )._compute_tensor(x, y) elif x.shape[0] > 1: - for i in range(x.shape[0]): ssim_val: torch.Tensor = SSIMMetric( data_range, self.win_size, self.k1, self.k2, self.spatial_dims diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index ecab61308b..405d9ae716 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -278,7 +278,6 @@ def __init__( self.w = torch.ones([1, 1] + [win_size for _ in range(spatial_dims)]) / win_size**spatial_dims def _compute_intermediate_statistics(self, x: torch.Tensor, y: torch.Tensor) -> tuple[torch.Tensor, ...]: - data_range = self.data_range[(None,) * (self.spatial_dims + 2)] # determine whether to work with 2D convolution or 3D conv = getattr(F, f"conv{self.spatial_dims}d") diff --git a/monai/networks/blocks/crf.py b/monai/networks/blocks/crf.py index 5bbad4dff4..398b89882a 100644 --- a/monai/networks/blocks/crf.py +++ b/monai/networks/blocks/crf.py @@ -92,7 +92,6 @@ def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): # mean field loop for _ in range(self.iterations): - # message passing step for both kernels bilateral_output = PHLFilter.apply(output_tensor, bilateral_features) gaussian_output = PHLFilter.apply(output_tensor, gaussian_features) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index dfb39adb34..afd3183581 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -77,7 +77,6 @@ def __init__( dropout: int | None = None, bias: bool = True, ): - self.spatial_dims = spatial_dims self.kernel_size = kernel_size self.num_res_units = num_res_units diff --git a/monai/networks/blocks/dynunet_block.py b/monai/networks/blocks/dynunet_block.py index 6b689cd0ab..12afab3464 100644 --- a/monai/networks/blocks/dynunet_block.py +++ b/monai/networks/blocks/dynunet_block.py @@ -301,7 +301,6 @@ def get_conv_layer( def get_padding(kernel_size: Sequence[int] | int, stride: Sequence[int] | int) -> tuple[int, ...] | int: - kernel_size_np = np.atleast_1d(kernel_size) stride_np = np.atleast_1d(stride) padding_np = (kernel_size_np - stride_np + 1) / 2 diff --git a/monai/networks/blocks/segresnet_block.py b/monai/networks/blocks/segresnet_block.py index 01fc907ab7..3337f50043 100644 --- a/monai/networks/blocks/segresnet_block.py +++ b/monai/networks/blocks/segresnet_block.py @@ -22,7 +22,6 @@ def get_conv_layer( spatial_dims: int, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, bias: bool = False ): - return Convolution( spatial_dims, in_channels, out_channels, strides=stride, kernel_size=kernel_size, bias=bias, conv_only=True ) @@ -78,7 +77,6 @@ def __init__( self.conv2 = get_conv_layer(spatial_dims, in_channels=in_channels, out_channels=in_channels) def forward(self, x): - identity = x x = self.norm1(x) diff --git a/monai/networks/blocks/squeeze_and_excitation.py b/monai/networks/blocks/squeeze_and_excitation.py index 0928cdb641..665e9020ff 100644 --- a/monai/networks/blocks/squeeze_and_excitation.py +++ b/monai/networks/blocks/squeeze_and_excitation.py @@ -247,7 +247,6 @@ def __init__( stride: int = 1, downsample: Convolution | None = None, ) -> None: - conv_param_1 = { "strides": 1, "kernel_size": 1, @@ -298,7 +297,6 @@ def __init__( stride: int = 1, downsample: Convolution | None = None, ) -> None: - conv_param_1 = { "strides": stride, "kernel_size": 1, @@ -348,7 +346,6 @@ def __init__( downsample: Convolution | None = None, base_width: int = 4, ) -> None: - conv_param_1 = { "strides": 1, "kernel_size": 1, diff --git a/monai/networks/layers/filtering.py b/monai/networks/layers/filtering.py index 4ed159c609..b8bfe9a3bc 100644 --- a/monai/networks/layers/filtering.py +++ b/monai/networks/layers/filtering.py @@ -85,7 +85,6 @@ class PHLFilter(torch.autograd.Function): @staticmethod def forward(ctx, input, features, sigmas=None): - scaled_features = features if sigmas is not None: for i in range(features.size(1)): diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index c5398b533e..a1122ceaa2 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -321,7 +321,6 @@ class SavitzkyGolayFilter(nn.Module): """ def __init__(self, window_length: int, order: int, axis: int = 2, mode: str = "zeros"): - super().__init__() if order >= window_length: raise ValueError("order must be less than window_length.") @@ -365,7 +364,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: @staticmethod def _make_coeffs(window_length, order): - half_length, rem = divmod(window_length, 2) if rem == 0: raise ValueError("window_length must be odd.") @@ -391,7 +389,6 @@ class HilbertTransform(nn.Module): """ def __init__(self, axis: int = 2, n: int | None = None) -> None: - super().__init__() self.axis = axis self.n = n diff --git a/monai/networks/nets/ahnet.py b/monai/networks/nets/ahnet.py index 0080180d75..ae248c0cd1 100644 --- a/monai/networks/nets/ahnet.py +++ b/monai/networks/nets/ahnet.py @@ -25,7 +25,6 @@ class Bottleneck3x3x1(nn.Module): - expansion = 4 def __init__( @@ -36,7 +35,6 @@ def __init__( stride: Sequence[int] | int = 1, downsample: nn.Sequential | None = None, ) -> None: - super().__init__() conv_type = Conv[Conv.CONV, spatial_dims] @@ -275,13 +273,11 @@ def __init__(self, spatial_dims: int, psp_block_num: int, in_ch: int, upsample_m def forward(self, x: torch.Tensor) -> torch.Tensor: outputs = [] if self.upsample_mode == "transpose": - for (project_module, pool_module, up_module) in zip( - self.project_modules, self.pool_modules, self.up_modules - ): + for project_module, pool_module, up_module in zip(self.project_modules, self.pool_modules, self.up_modules): output = up_module(project_module(pool_module(x))) outputs.append(output) else: - for (project_module, pool_module) in zip(self.project_modules, self.pool_modules): + for project_module, pool_module in zip(self.project_modules, self.pool_modules): interpolate_size = x.shape[2:] align_corners: bool | None = None if self.upsample_mode in ["trilinear", "bilinear"]: diff --git a/monai/networks/nets/autoencoder.py b/monai/networks/nets/autoencoder.py index 19906f3ba8..8f093bcc1d 100644 --- a/monai/networks/nets/autoencoder.py +++ b/monai/networks/nets/autoencoder.py @@ -105,7 +105,6 @@ def __init__( dropout: tuple | str | float | None = None, bias: bool = True, ) -> None: - super().__init__() self.dimensions = spatial_dims self.in_channels = in_channels diff --git a/monai/networks/nets/densenet.py b/monai/networks/nets/densenet.py index d822330347..2100272d91 100644 --- a/monai/networks/nets/densenet.py +++ b/monai/networks/nets/densenet.py @@ -181,7 +181,6 @@ def __init__( norm: str | tuple = "batch", dropout_prob: float = 0.0, ) -> None: - super().__init__() conv_type: type[nn.Conv1d | nn.Conv2d | nn.Conv3d] = Conv[Conv.CONV, spatial_dims] diff --git a/monai/networks/nets/dints.py b/monai/networks/nets/dints.py index 0135a544ca..90aac9eaae 100644 --- a/monai/networks/nets/dints.py +++ b/monai/networks/nets/dints.py @@ -568,7 +568,6 @@ def __init__( use_downsample: bool = True, device: str = "cpu", ): - super().__init__() n_feats = tuple([32 * (2**_i) for _i in range(num_depths + 1)]) diff --git a/monai/networks/nets/flexible_unet.py b/monai/networks/nets/flexible_unet.py index 6de629fc6b..fdb3376748 100644 --- a/monai/networks/nets/flexible_unet.py +++ b/monai/networks/nets/flexible_unet.py @@ -123,7 +123,6 @@ def __init__( align_corners: bool | None, is_pad: bool, ): - super().__init__() if len(encoder_channels) < 2: raise ValueError("the length of `encoder_channels` should be no less than 2.") @@ -196,7 +195,6 @@ def __init__( act: tuple | str | None = None, scale_factor: float = 1.0, ): - conv_layer = Conv[Conv.CONV, spatial_dims]( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=kernel_size // 2 ) diff --git a/monai/networks/nets/highresnet.py b/monai/networks/nets/highresnet.py index 7dd99172e1..e71f8d193d 100644 --- a/monai/networks/nets/highresnet.py +++ b/monai/networks/nets/highresnet.py @@ -147,7 +147,6 @@ def __init__( layer_params: Sequence[dict] = DEFAULT_LAYER_PARAMS_3D, channel_matching: ChannelMatching | str = ChannelMatching.PAD, ) -> None: - super().__init__() blocks = nn.ModuleList() @@ -168,7 +167,7 @@ def __init__( ) # residual blocks - for (idx, params) in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers. + for idx, params in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers. _in_chns, _out_chns = _out_chns, params["n_features"] _dilation = 2**idx for _ in range(params["repeat"]): diff --git a/monai/networks/nets/hovernet.py b/monai/networks/nets/hovernet.py index 4ab106f674..323e107fd7 100644 --- a/monai/networks/nets/hovernet.py +++ b/monai/networks/nets/hovernet.py @@ -92,7 +92,6 @@ def __init__( self.layers.add_module("dropout", dropout_type(dropout_prob)) def forward(self, x: torch.Tensor) -> torch.Tensor: - x1 = self.layers(x) if x1.shape[-1] != x.shape[-1]: trim = (x.shape[-1] - x1.shape[-1]) // 2 @@ -294,7 +293,6 @@ def __init__( self.requires_grad_(False) def forward(self, x: torch.Tensor) -> torch.Tensor: - sc = self.shortcut(x) if self.shortcut.stride == (2, 2): @@ -388,7 +386,6 @@ def __init__( ) def forward(self, xin: torch.Tensor, short_cuts: list[torch.Tensor]) -> torch.Tensor: - block_number = len(short_cuts) - 1 x = xin + short_cuts[block_number] @@ -466,7 +463,6 @@ def __init__( adapt_standard_resnet: bool = False, freeze_encoder: bool = False, ) -> None: - super().__init__() if isinstance(mode, str): @@ -576,7 +572,6 @@ def __init__( _load_pretrained_encoder(self, weights) def forward(self, x: torch.Tensor) -> dict[str, torch.Tensor]: - if self.mode == HoVerNetMode.ORIGINAL.value: if x.shape[-1] != 270 or x.shape[-2] != 270: raise ValueError("Input size should be 270 x 270 when using HoVerNetMode.ORIGINAL") @@ -607,7 +602,6 @@ def forward(self, x: torch.Tensor) -> dict[str, torch.Tensor]: def _load_pretrained_encoder(model: nn.Module, state_dict: OrderedDict | dict): - model_dict = model.state_dict() state_dict = { k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) @@ -618,7 +612,6 @@ def _load_pretrained_encoder(model: nn.Module, state_dict: OrderedDict | dict): def _remap_preact_resnet_model(model_url: str): - pattern_conv0 = re.compile(r"^(conv0\.\/)(.+)$") pattern_block = re.compile(r"^(d\d+)\.(.+)$") pattern_layer = re.compile(r"^(.+\.d\d+)\.units\.(\d+)(.+)$") @@ -647,7 +640,6 @@ def _remap_preact_resnet_model(model_url: str): def _remap_standard_resnet_model(model_url: str): - pattern_conv0 = re.compile(r"^conv1\.(.+)$") pattern_bn1 = re.compile(r"^bn1\.(.+)$") pattern_block = re.compile(r"^layer(\d+)\.(\d+)\.(.+)$") diff --git a/monai/networks/nets/milmodel.py b/monai/networks/nets/milmodel.py index f6b905bfda..0a25b7feec 100644 --- a/monai/networks/nets/milmodel.py +++ b/monai/networks/nets/milmodel.py @@ -61,7 +61,6 @@ def __init__( trans_blocks: int = 4, trans_dropout: float = 0.0, ) -> None: - super().__init__() if num_classes <= 0: @@ -75,7 +74,6 @@ def __init__( self.transformer: nn.Module | None = None if backbone is None: - net = models.resnet50(pretrained=pretrained) nfc = net.fc.in_features # save the number of final features net.fc = torch.nn.Identity() # remove final linear layer @@ -96,7 +94,6 @@ def hook(module, input, output): net.layer4.register_forward_hook(forward_hook("layer4")) elif isinstance(backbone, str): - # assume torchvision model string is provided torch_model = getattr(models, backbone, None) if torch_model is None: @@ -137,7 +134,6 @@ def hook(module, input, output): self.attention = nn.Sequential(nn.Linear(nfc, 2048), nn.Tanh(), nn.Linear(2048, 1)) elif self.mil_mode == "att_trans_pyramid": - transformer_list = nn.ModuleList( [ nn.TransformerEncoder( @@ -174,7 +170,6 @@ def hook(module, input, output): self.net = net def calc_head(self, x: torch.Tensor) -> torch.Tensor: - sh = x.shape if self.mil_mode == "mean": @@ -186,7 +181,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: x, _ = torch.max(x, dim=1) elif self.mil_mode == "att": - a = self.attention(x) a = torch.softmax(a, dim=1) x = torch.sum(x * a, dim=1) @@ -194,7 +188,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: x = self.myfc(x) elif self.mil_mode == "att_trans" and self.transformer is not None: - x = x.permute(1, 0, 2) x = self.transformer(x) x = x.permute(1, 0, 2) @@ -206,7 +199,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: x = self.myfc(x) elif self.mil_mode == "att_trans_pyramid" and self.transformer is not None: - l1 = torch.mean(self.extra_outputs["layer1"], dim=(2, 3)).reshape(sh[0], sh[1], -1).permute(1, 0, 2) l2 = torch.mean(self.extra_outputs["layer2"], dim=(2, 3)).reshape(sh[0], sh[1], -1).permute(1, 0, 2) l3 = torch.mean(self.extra_outputs["layer3"], dim=(2, 3)).reshape(sh[0], sh[1], -1).permute(1, 0, 2) @@ -233,7 +225,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: return x def forward(self, x: torch.Tensor, no_head: bool = False) -> torch.Tensor: - sh = x.shape x = x.reshape(sh[0] * sh[1], sh[2], sh[3], sh[4]) diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index c9e34d093c..02869d415f 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -198,7 +198,6 @@ def __init__( feed_forward: bool = True, bias_downsample: bool = True, # for backwards compatibility (also see PR #5477) ) -> None: - super().__init__() if isinstance(block, str): @@ -268,7 +267,6 @@ def _make_layer( shortcut_type: str, stride: int = 1, ) -> nn.Sequential: - conv_type: Callable = Conv[Conv.CONV, spatial_dims] norm_type: Callable = Norm[Norm.BATCH, spatial_dims] diff --git a/monai/networks/nets/segresnet_ds.py b/monai/networks/nets/segresnet_ds.py index 115bec455a..07f3824b51 100644 --- a/monai/networks/nets/segresnet_ds.py +++ b/monai/networks/nets/segresnet_ds.py @@ -152,7 +152,6 @@ def __init__( head_module: nn.Module | None = None, anisotropic_scales: tuple | None = None, ): - super().__init__() if spatial_dims not in (1, 2, 3): @@ -215,7 +214,6 @@ def __init__( self.spatial_dims = spatial_dims def _forward(self, x: torch.Tensor) -> list[torch.Tensor]: - outputs = [] x = self.conv_init(x) @@ -273,7 +271,6 @@ def __init__( upsample_mode: UpsampleMode | str = "deconv", resolution: tuple | None = None, ): - super().__init__() if spatial_dims not in (1, 2, 3): @@ -330,7 +327,6 @@ def __init__( self.up_layers = nn.ModuleList() for i in range(n_up): - filters = filters // 2 kernel_size, _, stride = ( aniso_kernel(anisotropic_scales[len(blocks_up) - i - 1]) if anisotropic_scales else (3, 1, 2) @@ -392,7 +388,6 @@ def is_valid_shape(self, x): return all(a) def _forward(self, x: torch.Tensor) -> torch.Tensor | list[torch.Tensor]: - if self.preprocess is not None: x = self.preprocess(x) diff --git a/monai/networks/nets/senet.py b/monai/networks/nets/senet.py index f8ca5fefef..51435a9ea2 100644 --- a/monai/networks/nets/senet.py +++ b/monai/networks/nets/senet.py @@ -108,7 +108,6 @@ def __init__( input_3x3: bool = True, num_classes: int = 1000, ) -> None: - super().__init__() if isinstance(block, str): @@ -222,7 +221,6 @@ def _make_layer( stride: int = 1, downsample_kernel_size: int = 1, ) -> nn.Sequential: - downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = Convolution( diff --git a/monai/networks/nets/swin_unetr.py b/monai/networks/nets/swin_unetr.py index faaef88514..9f8204968f 100644 --- a/monai/networks/nets/swin_unetr.py +++ b/monai/networks/nets/swin_unetr.py @@ -246,7 +246,6 @@ def __init__( self.out = UnetOutBlock(spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels) def load_from(self, weights): - with torch.no_grad(): self.swinViT.patch_embed.proj.weight.copy_(weights["state_dict"]["module.patch_embed.proj.weight"]) self.swinViT.patch_embed.proj.bias.copy_(weights["state_dict"]["module.patch_embed.proj.bias"]) @@ -704,7 +703,6 @@ def __init__(self, dim: int, norm_layer: type[LayerNorm] = nn.LayerNorm, spatial self.norm = norm_layer(4 * dim) def forward(self, x): - x_shape = x.size() if len(x_shape) == 5: b, d, h, w, c = x_shape diff --git a/monai/networks/nets/transchex.py b/monai/networks/nets/transchex.py index 8f7ad33340..31e27ffbf2 100644 --- a/monai/networks/nets/transchex.py +++ b/monai/networks/nets/transchex.py @@ -76,7 +76,6 @@ def from_pretrained( with tarfile.open(resolved_archive_file, "r:gz") as archive: def is_within_directory(directory, target): - abs_directory = os.path.abspath(directory) abs_target = os.path.abspath(target) @@ -85,7 +84,6 @@ def is_within_directory(directory, target): return prefix == abs_directory def safe_extract(tar, path=".", members=None, *, numeric_owner=False): - for member in tar.getmembers(): member_path = os.path.join(path, member.name) if not is_within_directory(path, member_path): diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py index 8de12ae6db..a48aabf915 100644 --- a/monai/networks/nets/unet.py +++ b/monai/networks/nets/unet.py @@ -125,7 +125,6 @@ def __init__( bias: bool = True, adn_ordering: str = "NDA", ) -> None: - super().__init__() if len(channels) < 2: @@ -215,7 +214,6 @@ def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_ """ mod: nn.Module if self.num_res_units > 0: - mod = ResidualUnit( self.dimensions, in_channels, diff --git a/monai/networks/nets/varautoencoder.py b/monai/networks/nets/varautoencoder.py index 29131281de..6cb8d6e40b 100644 --- a/monai/networks/nets/varautoencoder.py +++ b/monai/networks/nets/varautoencoder.py @@ -91,7 +91,6 @@ def __init__( bias: bool = True, use_sigmoid: bool = True, ) -> None: - self.in_channels, *self.in_shape = in_shape self.use_sigmoid = use_sigmoid diff --git a/monai/transforms/adaptors.py b/monai/transforms/adaptors.py index a2f061d13d..5729740690 100644 --- a/monai/transforms/adaptors.py +++ b/monai/transforms/adaptors.py @@ -148,7 +148,6 @@ def map_only_names(ditems, input_map): return {v: ditems[k] for k, v in input_map.items()} def _inner(ditems): - sig = FunctionSignature(function) if sig.found_kwargs: @@ -218,7 +217,6 @@ def _inner(ditems): @_monai_export("monai.transforms") def apply_alias(fn, name_map): def _inner(data): - # map names pre_call = dict(data) for _from, _to in name_map.items(): diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 774acf1e31..1980ef508b 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1069,7 +1069,6 @@ class SavitzkyGolaySmooth(Transform): backend = [TransformBackends.TORCH] def __init__(self, window_length: int, order: int, axis: int = 1, mode: str = "zeros"): - if axis < 0: raise ValueError("axis must be zero or positive.") @@ -1114,7 +1113,6 @@ class DetectEnvelope(Transform): backend = [TransformBackends.TORCH] def __init__(self, axis: int = 1, n: int | None = None) -> None: - if axis < 0: raise ValueError("axis must be zero or positive.") @@ -1506,7 +1504,6 @@ class GibbsNoise(Transform, Fourier): backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__(self, alpha: float = 0.1) -> None: - if alpha > 1 or alpha < 0: raise ValueError("alpha must take values in the interval [0, 1].") self.alpha = alpha @@ -1656,7 +1653,6 @@ class KSpaceSpikeNoise(Transform, Fourier): backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__(self, loc: tuple | Sequence[tuple], k_intensity: Sequence[float] | float | None = None): - self.loc = ensure_tuple(loc) self.k_intensity = k_intensity @@ -1790,7 +1786,6 @@ def __init__( intensity_range: Sequence[Sequence[float] | float] | None = None, channel_wise: bool = True, ): - self.intensity_range = intensity_range self.channel_wise = channel_wise self.sampled_k_intensity: list = [] @@ -2152,7 +2147,6 @@ class IntensityRemap(RandomizableTransform): """ def __init__(self, kernel_size: int = 30, slope: float = 0.7): - super().__init__() self.kernel_size = kernel_size @@ -2207,7 +2201,6 @@ class RandIntensityRemap(RandomizableTransform): """ def __init__(self, prob: float = 0.1, kernel_size: int = 30, slope: float = 0.7, channel_wise: bool = True): - RandomizableTransform.__init__(self, prob=prob) self.kernel_size = kernel_size self.slope = slope diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index a0e33557b9..790cb38671 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -1316,7 +1316,6 @@ def __init__( alpha: Sequence[float] = (0.0, 1.0), allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob=prob) self.rand_gibbs_noise = RandGibbsNoise(alpha=alpha, prob=1.0) @@ -1363,12 +1362,10 @@ class GibbsNoised(MapTransform): backend = GibbsNoise.backend def __init__(self, keys: KeysCollection, alpha: float = 0.5, allow_missing_keys: bool = False) -> None: - MapTransform.__init__(self, keys, allow_missing_keys) self.transform = GibbsNoise(alpha) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]: - d = dict(data) for key in self.key_iterator(d): d[key] = self.transform(d[key]) @@ -1425,7 +1422,6 @@ def __init__( k_intensity: Sequence[float] | float | None = None, allow_missing_keys: bool = False, ) -> None: - super().__init__(keys, allow_missing_keys) self.transform = KSpaceSpikeNoise(loc, k_intensity) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 2bee350eff..3cb23a541d 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -1965,7 +1965,6 @@ def __call__(self, spatial_size: Sequence[int]) -> torch.Tensor: class Resample(Transform): - backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__( @@ -2867,7 +2866,6 @@ def __call__( class GridDistortion(Transform): - backend = [TransformBackends.TORCH] def __init__( @@ -2965,7 +2963,6 @@ def __call__( class RandGridDistortion(RandomizableTransform): - backend = [TransformBackends.TORCH] def __init__( diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index ff1565c23e..a378c5cc4f 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -205,7 +205,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]: d: dict = dict(data) - for (key, mode, padding_mode, align_corners, dtype, dst_key) in self.key_iterator( + for key, mode, padding_mode, align_corners, dtype, dst_key in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype, self.dst_keys ): d[key] = self.sp_transform( @@ -278,7 +278,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]: d = dict(data) - for (key, mode, padding_mode, align_corners, dtype) in self.key_iterator( + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype ): d[key] = self.resampler( diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 9175c37740..d1e07f7b88 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -1018,7 +1018,6 @@ def __call__( class ClassesToIndices(Transform): - backend = [TransformBackends.NUMPY, TransformBackends.TORCH] def __init__( @@ -1574,7 +1573,6 @@ class ImageFilter(Transform): ) def __init__(self, filter: str | NdarrayOrTensor | nn.Module, filter_size: int | None = None, **kwargs) -> None: - self._check_filter_format(filter, filter_size) self._check_kwargs_are_present(filter, **kwargs) self.filter = filter diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 2e0bde21b4..2e2d647ef3 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -386,7 +386,6 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N class SplitDimd(MapTransform): - backend = SplitDim.backend def __init__( diff --git a/monai/transforms/utils_create_transform_ims.py b/monai/transforms/utils_create_transform_ims.py index f040ea18aa..a98cdfe936 100644 --- a/monai/transforms/utils_create_transform_ims.py +++ b/monai/transforms/utils_create_transform_ims.py @@ -466,7 +466,6 @@ def create_transform_im( if __name__ == "__main__": - keys = [CommonKeys.IMAGE, CommonKeys.LABEL] data = get_data(keys) create_transform_im(RandFlip, dict(prob=1, spatial_axis=1), data) diff --git a/monai/utils/profiling.py b/monai/utils/profiling.py index 059f600347..da5c0ac05c 100644 --- a/monai/utils/profiling.py +++ b/monai/utils/profiling.py @@ -57,7 +57,6 @@ def torch_profiler_full(func): @wraps(func) def wrapper(*args, **kwargs): - with torch.autograd.profiler.profile(use_cuda=True) as prof: result = func(*args, **kwargs) @@ -77,7 +76,6 @@ def torch_profiler_time_cpu_gpu(func): @wraps(func) def wrapper(*args, **kwargs): - with torch.autograd.profiler.profile(use_cuda=True) as prof: result = func(*args, **kwargs) @@ -103,7 +101,6 @@ def torch_profiler_time_end_to_end(func): @wraps(func) def wrapper(*args, **kwargs): - torch.cuda.synchronize() start = perf_counter() diff --git a/monai/visualize/utils.py b/monai/visualize/utils.py index b74bebe4ba..f6718fe7a5 100644 --- a/monai/visualize/utils.py +++ b/monai/visualize/utils.py @@ -199,7 +199,6 @@ def blend_images( raise ValueError("image and label should have matching spatial sizes.") if isinstance(alpha, (np.ndarray, torch.Tensor)): if image.shape[1:] != alpha.shape[1:]: # pytype: disable=attribute-error,invalid-directive - raise ValueError("if alpha is image, size should match input image and label.") # rescale arrays to [0, 1] if desired diff --git a/monai/visualize/visualizer.py b/monai/visualize/visualizer.py index b267dc8f16..e7f5d9bbbe 100644 --- a/monai/visualize/visualizer.py +++ b/monai/visualize/visualizer.py @@ -29,7 +29,6 @@ def default_upsampler(spatial_size: Sized, align_corners: bool = False) -> Calla """ def up(x): - linear_mode = [InterpolateMode.LINEAR, InterpolateMode.BILINEAR, InterpolateMode.TRILINEAR] interp_mode = linear_mode[len(spatial_size) - 1] return F.interpolate(x, size=spatial_size, mode=str(interp_mode.value), align_corners=align_corners) diff --git a/tests/min_tests.py b/tests/min_tests.py index 5bf8179629..0eddc34c3f 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -210,7 +210,6 @@ def run_testsuit(): if __name__ == "__main__": - # testing import submodules from monai.utils.module import load_submodules diff --git a/tests/runner.py b/tests/runner.py index 96a1d4a5c4..7a7cc9f28f 100644 --- a/tests/runner.py +++ b/tests/runner.py @@ -114,7 +114,6 @@ def get_default_pattern(loader): if __name__ == "__main__": - # Parse input arguments args = parse_args() diff --git a/tests/test_anchor_box.py b/tests/test_anchor_box.py index 3d53d79155..c29296e8ae 100644 --- a/tests/test_anchor_box.py +++ b/tests/test_anchor_box.py @@ -44,7 +44,6 @@ class TestAnchorGenerator(unittest.TestCase): @parameterized.expand(TEST_CASES_2D) def test_anchor_2d(self, input_param, image_shape, feature_maps_shapes): - torch_anchor_utils, _ = optional_import("torchvision.models.detection.anchor_utils") image_list, _ = optional_import("torchvision.models.detection.image_list") diff --git a/tests/test_auto3dseg.py b/tests/test_auto3dseg.py index 383d55b8a3..6152b265db 100644 --- a/tests/test_auto3dseg.py +++ b/tests/test_auto3dseg.py @@ -150,7 +150,6 @@ class TestImageAnalyzer(Analyzer): """ def __init__(self, image_key="image", stats_name="test_image"): - self.image_key = image_key report_format = {"test_stats": None} @@ -176,7 +175,6 @@ def setUp(self): @parameterized.expand(SIM_CPU_TEST_CASES) def test_data_analyzer_cpu(self, input_params): - sim_dim = input_params["sim_dim"] label_key = input_params["label_key"] image_only = not bool(label_key) diff --git a/tests/test_auto3dseg_hpo.py b/tests/test_auto3dseg_hpo.py index 0b65adbd70..bc276a7f22 100644 --- a/tests/test_auto3dseg_hpo.py +++ b/tests/test_auto3dseg_hpo.py @@ -130,7 +130,6 @@ def setUp(self) -> None: @skip_if_no_cuda def test_run_algo(self) -> None: - algo_dict = self.history[0] algo_name = list(algo_dict.keys())[0] algo = algo_dict[algo_name] diff --git a/tests/test_bilateral_approx_cpu.py b/tests/test_bilateral_approx_cpu.py index 04dd39b227..da30d5d7de 100644 --- a/tests/test_bilateral_approx_cpu.py +++ b/tests/test_bilateral_approx_cpu.py @@ -367,7 +367,6 @@ class BilateralFilterTestCaseCpuApprox(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_approx(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = True @@ -381,7 +380,6 @@ def test_cpu_approx(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = True diff --git a/tests/test_bilateral_approx_cuda.py b/tests/test_bilateral_approx_cuda.py index 6ae3133719..924ff3253e 100644 --- a/tests/test_bilateral_approx_cuda.py +++ b/tests/test_bilateral_approx_cuda.py @@ -368,7 +368,6 @@ class BilateralFilterTestCaseCudaApprox(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_approx(self, test_case_description, sigmas, input, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -386,7 +385,6 @@ def test_cuda_approx(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cuda") fast_approx = True diff --git a/tests/test_bilateral_precise.py b/tests/test_bilateral_precise.py index 1d2a5918d8..7fc7e06726 100644 --- a/tests/test_bilateral_precise.py +++ b/tests/test_bilateral_precise.py @@ -367,7 +367,6 @@ class BilateralFilterTestCaseCpuPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = False @@ -381,7 +380,6 @@ def test_cpu_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = False @@ -402,7 +400,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expec class BilateralFilterTestCaseCudaPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -420,7 +417,6 @@ def test_cuda_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cuda_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cuda") fast_approx = False diff --git a/tests/test_cast_to_type.py b/tests/test_cast_to_type.py index 070549edc8..6dd994120c 100644 --- a/tests/test_cast_to_type.py +++ b/tests/test_cast_to_type.py @@ -39,7 +39,6 @@ class TestCastToType(unittest.TestCase): @parameterized.expand(TESTS) def test_type(self, out_dtype, input_data, expected_type): - result = CastToType(dtype=out_dtype)(input_data) self.assertEqual(result.dtype, get_equivalent_dtype(expected_type, type(result))) diff --git a/tests/test_compute_generalized_dice.py b/tests/test_compute_generalized_dice.py index 2d38e5e0b1..961feb0561 100644 --- a/tests/test_compute_generalized_dice.py +++ b/tests/test_compute_generalized_dice.py @@ -132,7 +132,6 @@ def test_nans(self, input_data, expected_value): # Samplewise tests @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_value_class(self, input_data, expected_value): - # same test as for compute_meandice vals = {} vals["y_pred"] = input_data.pop("y_pred") @@ -145,7 +144,6 @@ def test_value_class(self, input_data, expected_value): # Aggregation tests @parameterized.expand([TEST_CASE_4, TEST_CASE_5]) def test_nans_class(self, params, input_data, expected_value): - generalized_dice_score = GeneralizedDiceScore(**params) generalized_dice_score(**input_data) result = generalized_dice_score.aggregate() diff --git a/tests/test_compute_meandice.py b/tests/test_compute_meandice.py index 4b74e31847..8e765c6192 100644 --- a/tests/test_compute_meandice.py +++ b/tests/test_compute_meandice.py @@ -200,7 +200,6 @@ def test_nans(self, input_data, expected_value): # DiceMetric class tests @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_10]) def test_value_class(self, input_data, expected_value): - # same test as for compute_meandice vals = {} vals["y_pred"] = input_data.pop("y_pred") @@ -212,7 +211,6 @@ def test_value_class(self, input_data, expected_value): @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8]) def test_nans_class(self, params, input_data, expected_value): - dice_metric = DiceMetric(**params) dice_metric(**input_data) result, _ = dice_metric.aggregate() diff --git a/tests/test_compute_meaniou.py b/tests/test_compute_meaniou.py index 68f87493a2..2ec0472b12 100644 --- a/tests/test_compute_meaniou.py +++ b/tests/test_compute_meaniou.py @@ -200,7 +200,6 @@ def test_nans(self, input_data, expected_value): # MeanIoU class tests @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_10]) def test_value_class(self, input_data, expected_value): - # same test as for compute_meaniou vals = {} vals["y_pred"] = input_data.pop("y_pred") @@ -212,7 +211,6 @@ def test_value_class(self, input_data, expected_value): @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8]) def test_nans_class(self, params, input_data, expected_value): - iou_metric = MeanIoU(**params) iou_metric(**input_data) result, _ = iou_metric.aggregate() diff --git a/tests/test_compute_regression_metrics.py b/tests/test_compute_regression_metrics.py index 5cddce7d62..b0fde3afe9 100644 --- a/tests/test_compute_regression_metrics.py +++ b/tests/test_compute_regression_metrics.py @@ -61,7 +61,6 @@ def test_shape_reduction(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) @@ -104,7 +103,6 @@ def test_compare_numpy(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor_a = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) in_tensor_b = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) @@ -154,7 +152,6 @@ def test_same_input(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) @@ -180,7 +177,6 @@ def test_diff_input(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor_a = torch.zeros((batch,) + (base,) * (spatial - 1)).to(device) in_tensor_b = torch.ones((batch,) + (base,) * (spatial - 1)).to(device) diff --git a/tests/test_crf_cpu.py b/tests/test_crf_cpu.py index 5f749119e7..e29a4d69eb 100644 --- a/tests/test_crf_cpu.py +++ b/tests/test_crf_cpu.py @@ -497,7 +497,6 @@ class CRFTestCaseCpu(unittest.TestCase): @parameterized.expand(TEST_CASES) def test(self, test_case_description, params, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu")) diff --git a/tests/test_crf_cuda.py b/tests/test_crf_cuda.py index 0c004b2825..8529e2e6de 100644 --- a/tests/test_crf_cuda.py +++ b/tests/test_crf_cuda.py @@ -498,7 +498,6 @@ class CRFTestCaseCuda(unittest.TestCase): @parameterized.expand(TEST_CASES) def test(self, test_case_description, params, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cuda")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cuda")) diff --git a/tests/test_crop_foregroundd.py b/tests/test_crop_foregroundd.py index 13158ca6c2..c23c4df339 100644 --- a/tests/test_crop_foregroundd.py +++ b/tests/test_crop_foregroundd.py @@ -21,7 +21,6 @@ TEST_POSITION, TESTS = [], [] for p in TEST_NDARRAYS_ALL: - TEST_POSITION.append( [ { diff --git a/tests/test_cumulative_average.py b/tests/test_cumulative_average.py index 2373b0511c..d815d9be77 100644 --- a/tests/test_cumulative_average.py +++ b/tests/test_cumulative_average.py @@ -34,7 +34,6 @@ class TestAverageMeter(unittest.TestCase): @parameterized.expand(TEST_CASE_1) def test_value_all(self, data): - # test orig self.run_test(data) diff --git a/tests/test_cumulative_average_dist.py b/tests/test_cumulative_average_dist.py index 9f45955544..17f4164838 100644 --- a/tests/test_cumulative_average_dist.py +++ b/tests/test_cumulative_average_dist.py @@ -25,7 +25,6 @@ class DistributedCumulativeAverage(DistTestCase): @DistCall(nnodes=1, nproc_per_node=2) def test_value(self): - rank = dist.get_rank() nprocs = dist.get_world_size() is_cuda = dist.get_backend() == dist.Backend.NCCL diff --git a/tests/test_dataset_summary.py b/tests/test_dataset_summary.py index 746c3d79cf..b1cc578f32 100644 --- a/tests/test_dataset_summary.py +++ b/tests/test_dataset_summary.py @@ -39,7 +39,6 @@ class TestDatasetSummary(unittest.TestCase): def test_spacing_intensity(self): set_determinism(seed=0) with tempfile.TemporaryDirectory() as tempdir: - for i in range(5): im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0) n = nib.Nifti1Image(im, np.eye(4)) @@ -75,7 +74,6 @@ def test_spacing_intensity(self): def test_anisotropic_spacing(self): with tempfile.TemporaryDirectory() as tempdir: - pixdims = [[1.0, 1.0, 5.0], [1.0, 1.0, 4.0], [1.0, 1.0, 4.5], [1.0, 1.0, 2.0], [1.0, 1.0, 1.0]] for i in range(5): im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0) diff --git a/tests/test_efficientnet.py b/tests/test_efficientnet.py index e67defa4a3..5bdad5a568 100644 --- a/tests/test_efficientnet.py +++ b/tests/test_efficientnet.py @@ -321,7 +321,6 @@ def test_drop_connect_layer(self): # testing 1D, 2D and 3D shape for rand_tensor_shape in [(512, 16, 4), (384, 16, 4, 4), (256, 16, 4, 4, 4)]: - # test validation mode, out tensor == in tensor training = False for p in p_list: diff --git a/tests/test_ensure_tuple.py b/tests/test_ensure_tuple.py index 3d0ef40dd3..dc6649ec4c 100644 --- a/tests/test_ensure_tuple.py +++ b/tests/test_ensure_tuple.py @@ -50,5 +50,4 @@ def test_value(self, input, expected_value, wrap_array=False): if __name__ == "__main__": - unittest.main() diff --git a/tests/test_fg_bg_to_indicesd.py b/tests/test_fg_bg_to_indicesd.py index 5d827b84d3..d0d1ae5fb6 100644 --- a/tests/test_fg_bg_to_indicesd.py +++ b/tests/test_fg_bg_to_indicesd.py @@ -20,7 +20,6 @@ TEST_CASES = [] for p in TEST_NDARRAYS: - TEST_CASES.append( [ {"keys": "label", "image_key": None, "image_threshold": 0.0, "output_shape": None}, diff --git a/tests/test_flexible_unet.py b/tests/test_flexible_unet.py index 4c66c903aa..aae0cf729a 100644 --- a/tests/test_flexible_unet.py +++ b/tests/test_flexible_unet.py @@ -47,12 +47,10 @@ def get_encoder_parameters(cls): @classmethod def num_channels_per_output(cls): - return [(32, 64, 128, 256, 512, 1024), (32, 64, 128, 256), (32, 64, 128, 256), (32, 64, 128, 256)] @classmethod def num_outputs(cls): - return [6, 4, 4, 4] @classmethod diff --git a/tests/test_generate_instance_contour.py b/tests/test_generate_instance_contour.py index 07a9f8525c..9058855e62 100644 --- a/tests/test_generate_instance_contour.py +++ b/tests/test_generate_instance_contour.py @@ -48,7 +48,6 @@ class TestGenerateInstanceContour(unittest.TestCase): @parameterized.expand(TEST_CASE) def test_shape(self, in_type, test_data, min_num_points, offset, expected): - inst_bbox = get_bbox(test_data[None]) inst_map = test_data[inst_bbox[0][0] : inst_bbox[0][1], inst_bbox[0][2] : inst_bbox[0][3]] result = GenerateInstanceContour(min_num_points=min_num_points)(in_type(inst_map[None]), offset=offset) diff --git a/tests/test_gmm.py b/tests/test_gmm.py index aede44a123..4ed3b956ff 100644 --- a/tests/test_gmm.py +++ b/tests/test_gmm.py @@ -275,7 +275,6 @@ def tearDown(self) -> None: @parameterized.expand(TEST_CASES) @skip_if_no_cuda def test_cuda(self, test_case_description, mixture_count, class_count, features, labels, expected): - # Device to run on device = torch.device("cuda") diff --git a/tests/test_handler_classification_saver.py b/tests/test_handler_classification_saver.py index a885cce7f7..905e326a66 100644 --- a/tests/test_handler_classification_saver.py +++ b/tests/test_handler_classification_saver.py @@ -28,7 +28,6 @@ class TestHandlerClassificationSaver(unittest.TestCase): def test_saved_content(self): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): engine.state.batch = decollate_batch(batch) diff --git a/tests/test_handler_logfile.py b/tests/test_handler_logfile.py index 9e8006644d..f09876ab0a 100644 --- a/tests/test_handler_logfile.py +++ b/tests/test_handler_logfile.py @@ -60,7 +60,6 @@ def test_filename(self): filename = "something_else.txt" with tempfile.TemporaryDirectory() as tempdir: - handler = LogfileHandler(output_dir=tempdir, filename=filename) handler.attach(self.engine) diff --git a/tests/test_handler_mlflow.py b/tests/test_handler_mlflow.py index c88caae0a9..99180860a7 100644 --- a/tests/test_handler_mlflow.py +++ b/tests/test_handler_mlflow.py @@ -60,7 +60,6 @@ def tearDown(self): def test_metrics_track(self): experiment_param = {"backbone": "efficientnet_b0"} with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): return [batch + 1.0] diff --git a/tests/test_handler_prob_map_producer.py b/tests/test_handler_prob_map_producer.py index 4fc44d612e..153a00b1ac 100644 --- a/tests/test_handler_prob_map_producer.py +++ b/tests/test_handler_prob_map_producer.py @@ -51,7 +51,6 @@ def __init__(self, name, size): ] def __getitem__(self, index): - image = np.ones((3, 2, 2)) * index metadata = { ProbMapKeys.COUNT.value: self.data[index][ProbMapKeys.COUNT.value], diff --git a/tests/test_handler_regression_metrics.py b/tests/test_handler_regression_metrics.py index 101862ae66..a06452c54d 100644 --- a/tests/test_handler_regression_metrics.py +++ b/tests/test_handler_regression_metrics.py @@ -66,7 +66,6 @@ def test_compute(self): # iterate over all variations and check shapes for different reduction functions for mt_fn, mt_fn_np in zip(metrics, metrics_np): - for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: diff --git a/tests/test_handler_tb_image.py b/tests/test_handler_tb_image.py index 031dfe707f..8657e552f1 100644 --- a/tests/test_handler_tb_image.py +++ b/tests/test_handler_tb_image.py @@ -34,7 +34,6 @@ class TestHandlerTBImage(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_tb_image_shape(self, shape): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): engine.state.batch = decollate_batch(list(batch)) diff --git a/tests/test_handler_tb_stats.py b/tests/test_handler_tb_stats.py index 4e7f599afa..b135dee86e 100644 --- a/tests/test_handler_tb_stats.py +++ b/tests/test_handler_tb_stats.py @@ -27,7 +27,6 @@ class TestHandlerTBStats(unittest.TestCase): def test_metrics_print(self): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): return [batch + 1.0] @@ -50,7 +49,6 @@ def _update_metric(engine): def test_metrics_writer(self): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): return [batch + 1.0] diff --git a/tests/test_hilbert_transform.py b/tests/test_hilbert_transform.py index a34bdddc93..4c49aecd8b 100644 --- a/tests/test_hilbert_transform.py +++ b/tests/test_hilbert_transform.py @@ -23,7 +23,6 @@ def create_expected_numpy_output(input_datum, **kwargs): - x = np.fft.fft(input_datum.cpu().numpy() if input_datum.device.type == "cuda" else input_datum.numpy(), **kwargs) f = np.fft.fftfreq(x.shape[kwargs["axis"]]) u = np.heaviside(f, 0.5) diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index 16914073cb..da883724a0 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -58,7 +58,6 @@ def __getitem__(self, index): def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", num_workers=10): - monai.config.print_config() # define transforms for image and classification train_transforms = Compose( diff --git a/tests/test_inverse.py b/tests/test_inverse.py index c081d38dfe..0423c80d6b 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -72,7 +72,6 @@ from tests.utils import make_nifti_image, make_rand_affine if TYPE_CHECKING: - has_nib = True else: _, has_nib = optional_import("nibabel") @@ -454,7 +453,6 @@ def test_inverse(self, _, data_name, acceptable_diff, is_meta, *transforms): # skip this test if multiprocessing uses 'spawn', as the check is only basic anyway @skipUnless(torch.multiprocessing.get_start_method() == "spawn", "requires spawn") def test_fail(self): - t1 = SpatialPadd("image", [10, 5]) data = t1(self.all_data["2D"]) @@ -465,7 +463,6 @@ def test_fail(self): @parameterized.expand(N_SAMPLES_TESTS) def test_inverse_inferred_seg(self, extra_transform): - test_data = [] for _ in range(20): image, label = create_test_image_2d(100, 101) diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index aa8266710d..05e296e6b9 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -46,7 +46,6 @@ from tests.utils import make_nifti_image if TYPE_CHECKING: - has_nib = True else: _, has_nib = optional_import("nibabel") diff --git a/tests/test_k_space_spike_noise.py b/tests/test_k_space_spike_noise.py index cc0a4932af..4d820573a6 100644 --- a/tests/test_k_space_spike_noise.py +++ b/tests/test_k_space_spike_noise.py @@ -47,7 +47,6 @@ def get_data(im_shape, im_type): @parameterized.expand(TESTS) def test_same_result(self, im_shape, im_type, k_intensity): - im = self.get_data(im_shape, im_type) loc = [0, int(im.shape[1] / 2), 0] if len(im_shape) == 2 else [0, int(im.shape[1] / 2), 0, 0] t = KSpaceSpikeNoise(loc, k_intensity) @@ -63,7 +62,6 @@ def test_same_result(self, im_shape, im_type, k_intensity): @parameterized.expand(TESTS) def test_highlighted_kspace_pixel(self, im_shape, as_tensor_input, k_intensity): - im = self.get_data(im_shape, as_tensor_input) loc = [0, int(im.shape[1] / 2), 0] if len(im_shape) == 2 else [0, int(im.shape[1] / 2), 0, 0] t = KSpaceSpikeNoise(loc, k_intensity) diff --git a/tests/test_k_space_spike_noised.py b/tests/test_k_space_spike_noised.py index 8cd42d7c08..76a79d4b12 100644 --- a/tests/test_k_space_spike_noised.py +++ b/tests/test_k_space_spike_noised.py @@ -49,7 +49,6 @@ def get_data(im_shape, im_type): @parameterized.expand(TESTS) def test_same_result(self, im_shape, im_type): - data = self.get_data(im_shape, im_type) loc = [0] + [int(im_shape[i] / 2) for i in range(len(im_shape))] k_intensity = 10 @@ -66,7 +65,6 @@ def test_same_result(self, im_shape, im_type): @parameterized.expand(TESTS) def test_highlighted_kspace_pixel(self, im_shape, im_type): - data = self.get_data(im_shape, im_type) loc = [0] + [int(im_shape[i] / 2) for i in range(len(im_shape))] k_intensity = 10 diff --git a/tests/test_lr_finder.py b/tests/test_lr_finder.py index 4472a75716..c10016eeff 100644 --- a/tests/test_lr_finder.py +++ b/tests/test_lr_finder.py @@ -49,7 +49,6 @@ @unittest.skipUnless(has_pil, "requires PIL") class TestLRFinder(unittest.TestCase): def setUp(self): - self.root_dir = MONAIEnvVars.data_dir() if not self.root_dir: self.root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") diff --git a/tests/test_masked_inference_wsi_dataset.py b/tests/test_masked_inference_wsi_dataset.py index e9cd6724ab..bb90f7900b 100644 --- a/tests/test_masked_inference_wsi_dataset.py +++ b/tests/test_masked_inference_wsi_dataset.py @@ -40,7 +40,6 @@ def prepare_data(*masks): - mask = np.zeros((HEIGHT // 2, WIDTH // 2)) mask[100, 100] = 1 np.save(masks[0], mask) diff --git a/tests/test_mlp.py b/tests/test_mlp.py index a9010b2944..8ad66ebc6e 100644 --- a/tests/test_mlp.py +++ b/tests/test_mlp.py @@ -24,7 +24,6 @@ for dropout_rate in np.linspace(0, 1, 4): for hidden_size in [128, 256, 512, 768]: for mlp_dim in [0, 1028, 2048, 3072]: - test_case = [ {"hidden_size": hidden_size, "mlp_dim": mlp_dim, "dropout_rate": dropout_rate}, (2, 512, hidden_size), diff --git a/tests/test_network_consistency.py b/tests/test_network_consistency.py index 2ca8c5a4b0..948e4d0615 100644 --- a/tests/test_network_consistency.py +++ b/tests/test_network_consistency.py @@ -49,7 +49,6 @@ def tearDown(self): ) @parameterized.expand(TESTS, skip_on_empty=True) def test_network_consistency(self, net_name, data_path, json_path): - print("Net name: " + net_name) print("Data path: " + data_path) print("JSON path: " + json_path) diff --git a/tests/test_nifti_endianness.py b/tests/test_nifti_endianness.py index 0e00e6077d..2539d95fd5 100644 --- a/tests/test_nifti_endianness.py +++ b/tests/test_nifti_endianness.py @@ -53,7 +53,6 @@ def setUp(self): @parameterized.expand(TESTS) @skipUnless(has_nib, "Requires NiBabel") def test_endianness(self, endianness, use_array, image_only): - hdr = nib.Nifti1Header(endianness=endianness) nii = nib.Nifti1Image(self.im, np.eye(4), header=hdr) nib.save(nii, self.fname) diff --git a/tests/test_pad_collation.py b/tests/test_pad_collation.py index 0af9d49b39..cd98f29abf 100644 --- a/tests/test_pad_collation.py +++ b/tests/test_pad_collation.py @@ -87,7 +87,6 @@ def tearDown(self) -> None: @parameterized.expand(TESTS) def test_pad_collation(self, t_type, collate_method, transform): - if t_type == dict: dataset = CacheDataset(self.dict_data, transform, progress=False) else: diff --git a/tests/test_phl_cpu.py b/tests/test_phl_cpu.py index a558ba2827..98a5018d8e 100644 --- a/tests/test_phl_cpu.py +++ b/tests/test_phl_cpu.py @@ -244,7 +244,6 @@ class PHLFilterTestCaseCpu(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu(self, test_case_description, sigmas, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu")) diff --git a/tests/test_phl_cuda.py b/tests/test_phl_cuda.py index 97c3db6c70..0ddfd5eaae 100644 --- a/tests/test_phl_cuda.py +++ b/tests/test_phl_cuda.py @@ -152,7 +152,6 @@ class PHLFilterTestCaseCuda(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda(self, test_case_description, sigmas, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cuda")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cuda")) diff --git a/tests/test_rand_k_space_spike_noised.py b/tests/test_rand_k_space_spike_noised.py index 37e8ebcf81..3e1c11b2d9 100644 --- a/tests/test_rand_k_space_spike_noised.py +++ b/tests/test_rand_k_space_spike_noised.py @@ -46,7 +46,6 @@ def get_data(im_shape, im_type): @parameterized.expand(TESTS) def test_same_result(self, im_shape, im_type): - data = self.get_data(im_shape, im_type) t = RandKSpaceSpikeNoised(KEYS, prob=1.0, intensity_range=(13, 15), channel_wise=True) diff --git a/tests/test_segresnet_ds.py b/tests/test_segresnet_ds.py index e8382f7079..a5b88f9724 100644 --- a/tests/test_segresnet_ds.py +++ b/tests/test_segresnet_ds.py @@ -81,7 +81,6 @@ def test_shape(self, input_param, input_shape, expected_shape): @parameterized.expand(TEST_CASE_SEGRESNET_DS2) def test_shape2(self, input_param, input_shape, expected_shape): - dsdepth = input_param.get("dsdepth", 1) net = SegResNetDS(**input_param).to(device) @@ -107,7 +106,6 @@ def test_shape2(self, input_param, input_shape, expected_shape): @parameterized.expand(TEST_CASE_SEGRESNET_DS3) def test_shape3(self, input_param, input_shape, expected_shapes): - dsdepth = input_param.get("dsdepth", 1) net = SegResNetDS(**input_param).to(device) diff --git a/tests/test_selfattention.py b/tests/test_selfattention.py index 08eac70e51..926ef7da55 100644 --- a/tests/test_selfattention.py +++ b/tests/test_selfattention.py @@ -28,7 +28,6 @@ for dropout_rate in np.linspace(0, 1, 4): for hidden_size in [360, 480, 600, 768]: for num_heads in [4, 6, 8, 12]: - test_case = [ {"hidden_size": hidden_size, "num_heads": num_heads, "dropout_rate": dropout_rate}, (2, 512, hidden_size), diff --git a/tests/test_squeezedim.py b/tests/test_squeezedim.py index 9f08af540f..6673fd25c1 100644 --- a/tests/test_squeezedim.py +++ b/tests/test_squeezedim.py @@ -34,7 +34,6 @@ class TestSqueezeDim(unittest.TestCase): @parameterized.expand(TESTS) def test_shape(self, input_param, test_data, expected_shape): - result = SqueezeDim(**input_param)(test_data) self.assertTupleEqual(result.shape, expected_shape) if "dim" in input_param and input_param["dim"] == 2 and isinstance(result, MetaTensor): @@ -42,7 +41,6 @@ def test_shape(self, input_param, test_data, expected_shape): @parameterized.expand(TESTS_FAIL) def test_invalid_inputs(self, exception, input_param, test_data): - with self.assertRaises(exception): SqueezeDim(**input_param)(test_data) diff --git a/tests/test_state_cacher.py b/tests/test_state_cacher.py index 6cb404b976..2037dc3951 100644 --- a/tests/test_state_cacher.py +++ b/tests/test_state_cacher.py @@ -38,7 +38,6 @@ class TestStateCacher(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_state_cacher(self, data_obj, params): - key = "data_obj" state_cacher = StateCacher(**params) diff --git a/tests/test_tile_on_grid.py b/tests/test_tile_on_grid.py index 714b65f98c..06b5fe65f0 100644 --- a/tests/test_tile_on_grid.py +++ b/tests/test_tile_on_grid.py @@ -77,7 +77,6 @@ def make_image( seed=123, **kwargs, ): - tile_count = int(np.sqrt(tile_count)) pad = 0 if random_offset: @@ -117,7 +116,6 @@ def make_image( class TestTileOnGrid(unittest.TestCase): @parameterized.expand(TESTS) def test_tile_patch_single_call(self, in_type, input_parameters): - img, tiles = make_image(**input_parameters) input_img = in_type(img) @@ -127,7 +125,6 @@ def test_tile_patch_single_call(self, in_type, input_parameters): @parameterized.expand(TESTS2) def test_tile_patch_random_call(self, in_type, input_parameters): - img, tiles = make_image(**input_parameters, seed=123) input_img = in_type(img) diff --git a/tests/test_tile_on_grid_dict.py b/tests/test_tile_on_grid_dict.py index cb824ee2e6..bb8689fd3b 100644 --- a/tests/test_tile_on_grid_dict.py +++ b/tests/test_tile_on_grid_dict.py @@ -86,7 +86,6 @@ def make_image( seed=123, **kwargs, ): - tile_count = int(np.sqrt(tile_count)) pad = 0 if random_offset: @@ -126,7 +125,6 @@ def make_image( class TestTileOnGridDict(unittest.TestCase): @parameterized.expand(TESTS) def test_tile_patch_single_call(self, in_type, input_parameters): - key = "image" input_parameters["keys"] = key @@ -149,7 +147,6 @@ def test_tile_patch_single_call(self, in_type, input_parameters): @parameterized.expand(TESTS2) def test_tile_patch_random_call(self, in_type, input_parameters): - key = "image" input_parameters["keys"] = key diff --git a/tests/test_trainable_bilateral.py b/tests/test_trainable_bilateral.py index 1300e5068d..43b628be80 100644 --- a/tests/test_trainable_bilateral.py +++ b/tests/test_trainable_bilateral.py @@ -275,7 +275,6 @@ class BilateralFilterTestCaseCpuPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -302,7 +301,6 @@ def test_cpu_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -375,7 +373,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expec class BilateralFilterTestCaseCudaPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -406,7 +403,6 @@ def test_cuda_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cuda_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cuda") diff --git a/tests/test_trainable_joint_bilateral.py b/tests/test_trainable_joint_bilateral.py index a8725dbf17..a42510b7c6 100644 --- a/tests/test_trainable_joint_bilateral.py +++ b/tests/test_trainable_joint_bilateral.py @@ -359,7 +359,6 @@ class JointBilateralFilterTestCaseCpuPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, guide, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -389,7 +388,6 @@ def test_cpu_precise(self, test_case_description, sigmas, input, guide, expected @parameterized.expand(TEST_CASES) def test_cpu_precise_backwards(self, test_case_description, sigmas, input, guide, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -484,7 +482,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, guide class JointBilateralFilterTestCaseCudaPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, guide, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -518,7 +515,6 @@ def test_cuda_precise(self, test_case_description, sigmas, input, guide, expecte @parameterized.expand(TEST_CASES) def test_cuda_precise_backwards(self, test_case_description, sigmas, input, guide, expected): - # Params to determine the implementation to test device = torch.device("cuda") diff --git a/tests/test_transformerblock.py b/tests/test_transformerblock.py index 2650367886..f1a20b842c 100644 --- a/tests/test_transformerblock.py +++ b/tests/test_transformerblock.py @@ -25,7 +25,6 @@ for hidden_size in [360, 480, 600, 768]: for num_heads in [4, 8, 12]: for mlp_dim in [1024, 3072]: - test_case = [ { "hidden_size": hidden_size, diff --git a/tests/testing_data/integration_answers.py b/tests/testing_data/integration_answers.py index 8bf6b1e368..f4a5483f83 100644 --- a/tests/testing_data/integration_answers.py +++ b/tests/testing_data/integration_answers.py @@ -644,7 +644,7 @@ def test_integration_value(test_name, key, data, rtol=1e-2): - for (idx, expected) in enumerate(EXPECTED_ANSWERS): + for idx, expected in enumerate(EXPECTED_ANSWERS): if test_name not in expected: continue if key not in expected[test_name]: diff --git a/tests/utils.py b/tests/utils.py index 280b848806..2f4b6d81ac 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -575,7 +575,6 @@ def run_process(func, args, kwargs, results): results.put(e) def __call__(self, obj): - if self.skip_timing: return obj From f7963819c79f7461002219082e89c7b077e26081 Mon Sep 17 00:00:00 2001 From: Carlotta Date: Sun, 5 Feb 2023 12:35:27 +0100 Subject: [PATCH 02/68] initial commit, reformated original monai files --- monai/apps/auto3dseg/auto_runner.py | 2 - monai/apps/auto3dseg/bundle_gen.py | 3 - monai/apps/auto3dseg/data_analyzer.py | 2 - monai/apps/auto3dseg/ensemble_builder.py | 2 - monai/apps/auto3dseg/hpo_gen.py | 2 - monai/apps/deepedit/interaction.py | 1 - monai/apps/deepedit/transforms.py | 1 - monai/apps/deepgrow/interaction.py | 1 - monai/apps/detection/utils/anchor_utils.py | 1 - monai/apps/nuclick/transforms.py | 1 - monai/apps/pathology/metrics/lesion_froc.py | 1 - monai/apps/pathology/transforms/post/array.py | 1 - .../pathology/transforms/spatial/array.py | 2 - .../transforms/spatial/dictionary.py | 1 - .../reconstruction/transforms/dictionary.py | 1 - monai/auto3dseg/analyzer.py | 4 - monai/auto3dseg/seg_summarizer.py | 1 - monai/config/deviceconfig.py | 1 - monai/data/image_reader.py | 2 +- monai/data/thread_buffer.py | 1 - monai/data/utils.py | 1 - monai/engines/evaluator.py | 1 - monai/losses/ds_loss.py | 1 - monai/losses/quicknatLoss.py | 179 ++++++++++++++++++ monai/losses/ssim_loss.py | 1 - monai/metrics/regression.py | 1 - monai/networks/blocks/crf.py | 1 - monai/networks/blocks/denseblock.py | 1 - monai/networks/blocks/dynunet_block.py | 1 - monai/networks/blocks/segresnet_block.py | 2 - .../networks/blocks/squeeze_and_excitation.py | 3 - monai/networks/layers/filtering.py | 1 - monai/networks/layers/simplelayers.py | 3 - monai/networks/nets/ahnet.py | 8 +- monai/networks/nets/autoencoder.py | 1 - monai/networks/nets/densenet.py | 1 - monai/networks/nets/dints.py | 1 - monai/networks/nets/flexible_unet.py | 2 - monai/networks/nets/highresnet.py | 3 +- monai/networks/nets/hovernet.py | 8 - monai/networks/nets/milmodel.py | 9 - monai/networks/nets/resnet.py | 2 - monai/networks/nets/segresnet_ds.py | 5 - monai/networks/nets/senet.py | 2 - monai/networks/nets/swin_unetr.py | 2 - monai/networks/nets/transchex.py | 2 - monai/networks/nets/unet.py | 2 - monai/networks/nets/varautoencoder.py | 1 - monai/transforms/adaptors.py | 2 - monai/transforms/intensity/array.py | 7 - monai/transforms/intensity/dictionary.py | 4 - monai/transforms/spatial/array.py | 3 - monai/transforms/spatial/dictionary.py | 4 +- monai/transforms/utility/array.py | 2 - monai/transforms/utility/dictionary.py | 1 - .../transforms/utils_create_transform_ims.py | 1 - monai/utils/profiling.py | 3 - monai/visualize/utils.py | 1 - monai/visualize/visualizer.py | 1 - tests/min_tests.py | 1 - tests/runner.py | 1 - tests/test_anchor_box.py | 1 - tests/test_auto3dseg.py | 2 - tests/test_auto3dseg_hpo.py | 1 - tests/test_bilateral_approx_cpu.py | 2 - tests/test_bilateral_approx_cuda.py | 2 - tests/test_bilateral_precise.py | 4 - tests/test_cast_to_type.py | 1 - tests/test_compute_generalized_dice.py | 2 - tests/test_compute_meandice.py | 2 - tests/test_compute_meaniou.py | 2 - tests/test_compute_regression_metrics.py | 4 - tests/test_crf_cpu.py | 1 - tests/test_crf_cuda.py | 1 - tests/test_crop_foregroundd.py | 1 - tests/test_cumulative_average.py | 1 - tests/test_cumulative_average_dist.py | 1 - tests/test_dataset_summary.py | 2 - tests/test_efficientnet.py | 1 - tests/test_ensure_tuple.py | 1 - tests/test_fg_bg_to_indicesd.py | 1 - tests/test_flexible_unet.py | 2 - tests/test_generate_instance_contour.py | 1 - tests/test_gmm.py | 1 - tests/test_handler_classification_saver.py | 1 - tests/test_handler_logfile.py | 1 - tests/test_handler_mlflow.py | 1 - tests/test_handler_prob_map_producer.py | 1 - tests/test_handler_regression_metrics.py | 1 - tests/test_handler_tb_image.py | 1 - tests/test_handler_tb_stats.py | 2 - tests/test_hilbert_transform.py | 1 - tests/test_integration_classification_2d.py | 1 - tests/test_inverse.py | 3 - tests/test_inverse_collation.py | 1 - tests/test_k_space_spike_noise.py | 2 - tests/test_k_space_spike_noised.py | 2 - tests/test_lr_finder.py | 1 - tests/test_masked_inference_wsi_dataset.py | 1 - tests/test_mlp.py | 1 - tests/test_network_consistency.py | 1 - tests/test_nifti_endianness.py | 1 - tests/test_pad_collation.py | 1 - tests/test_phl_cpu.py | 1 - tests/test_phl_cuda.py | 1 - tests/test_rand_k_space_spike_noised.py | 1 - tests/test_segresnet_ds.py | 2 - tests/test_selfattention.py | 1 - tests/test_squeezedim.py | 2 - tests/test_state_cacher.py | 1 - tests/test_tile_on_grid.py | 3 - tests/test_tile_on_grid_dict.py | 3 - tests/test_trainable_bilateral.py | 4 - tests/test_trainable_joint_bilateral.py | 4 - tests/test_transformerblock.py | 1 - tests/testing_data/integration_answers.py | 2 +- tests/utils.py | 1 - 117 files changed, 186 insertions(+), 209 deletions(-) create mode 100644 monai/losses/quicknatLoss.py diff --git a/monai/apps/auto3dseg/auto_runner.py b/monai/apps/auto3dseg/auto_runner.py index 2d83b0690f..60b58fca27 100644 --- a/monai/apps/auto3dseg/auto_runner.py +++ b/monai/apps/auto3dseg/auto_runner.py @@ -223,7 +223,6 @@ def __init__( templates_path_or_url: str | None = None, **kwargs, ): - logger.info(f"AutoRunner using work directory {work_dir}") os.makedirs(work_dir, exist_ok=True) @@ -641,7 +640,6 @@ def run(self): # step 2: algorithm generation if self.algo_gen: - if not os.path.isfile(self.datastats_filename): raise ValueError( f"Could not find the datastats file {self.datastats_filename}. " diff --git a/monai/apps/auto3dseg/bundle_gen.py b/monai/apps/auto3dseg/bundle_gen.py index 397244a73a..54af2b99d2 100644 --- a/monai/apps/auto3dseg/bundle_gen.py +++ b/monai/apps/auto3dseg/bundle_gen.py @@ -377,9 +377,7 @@ def __init__( data_stats_filename: str | None = None, data_src_cfg_name: str | None = None, ): - if algos is None or isinstance(algos, (list, tuple, str)): - if templates_path_or_url is None: templates_path_or_url = default_algo_zip @@ -404,7 +402,6 @@ def __init__( self.algos: Any = [] if isinstance(algos, dict): for algo_name, algo_params in algos.items(): - template_path = os.path.dirname(algo_params.get("template_path", ".")) if len(template_path) > 0 and template_path not in sys.path: sys.path.append(template_path) diff --git a/monai/apps/auto3dseg/data_analyzer.py b/monai/apps/auto3dseg/data_analyzer.py index ffa52f6646..9176ede626 100644 --- a/monai/apps/auto3dseg/data_analyzer.py +++ b/monai/apps/auto3dseg/data_analyzer.py @@ -214,7 +214,6 @@ def get_all_case_stats(self, key="training", transform_list=None): Orientationd(keys=keys, axcodes="RAS"), ] if self.label_key is not None: - allowed_shape_difference = self.extra_params.pop("allowed_shape_difference", 5) transform_list.append( EnsureSameShaped( @@ -235,7 +234,6 @@ def get_all_case_stats(self, key="training", transform_list=None): warnings.warn("tqdm is not installed. not displaying the caching progress.") for batch_data in tqdm(dataloader) if has_tqdm else dataloader: - batch_data = batch_data[0] batch_data[self.image_key] = batch_data[self.image_key].to(self.device) diff --git a/monai/apps/auto3dseg/ensemble_builder.py b/monai/apps/auto3dseg/ensemble_builder.py index 224b412145..548911d0c7 100644 --- a/monai/apps/auto3dseg/ensemble_builder.py +++ b/monai/apps/auto3dseg/ensemble_builder.py @@ -180,7 +180,6 @@ class AlgoEnsembleBestN(AlgoEnsemble): """ def __init__(self, n_best: int = 5): - super().__init__() self.n_best = n_best @@ -225,7 +224,6 @@ class AlgoEnsembleBestByFold(AlgoEnsemble): """ def __init__(self, n_fold: int = 5): - super().__init__() self.n_fold = n_fold diff --git a/monai/apps/auto3dseg/hpo_gen.py b/monai/apps/auto3dseg/hpo_gen.py index 922c93790f..ec18d897b8 100644 --- a/monai/apps/auto3dseg/hpo_gen.py +++ b/monai/apps/auto3dseg/hpo_gen.py @@ -197,7 +197,6 @@ def generate(self, output_folder: str = ".") -> None: if isinstance(self.algo, BundleAlgo): self.algo.export_to_disk(output_folder, task_prefix + task_id, fill_with_datastats=False) else: - ConfigParser.export_config_file(self.params, write_path) logger.info(write_path) @@ -376,7 +375,6 @@ def generate(self, output_folder: str = ".") -> None: if isinstance(self.algo, BundleAlgo): self.algo.export_to_disk(output_folder, task_prefix + task_id, fill_with_datastats=False) else: - ConfigParser.export_config_file(self.params, write_path) logger.info(write_path) diff --git a/monai/apps/deepedit/interaction.py b/monai/apps/deepedit/interaction.py index 05623cf248..4c5bd3ec2b 100644 --- a/monai/apps/deepedit/interaction.py +++ b/monai/apps/deepedit/interaction.py @@ -51,7 +51,6 @@ def __init__( click_probability_key: str = "probability", max_interactions: int = 1, ) -> None: - self.deepgrow_probability = deepgrow_probability self.transforms = Compose(transforms) if not isinstance(transforms, Compose) else transforms self.train = train diff --git a/monai/apps/deepedit/transforms.py b/monai/apps/deepedit/transforms.py index 89790b2624..0e2a0d037d 100644 --- a/monai/apps/deepedit/transforms.py +++ b/monai/apps/deepedit/transforms.py @@ -529,7 +529,6 @@ def find_guidance(self, discrepancy): return None def add_guidance(self, guidance, discrepancy, label_names, labels): - # Positive clicks of the segment in the iteration pos_discr = discrepancy[0] # idx 0 is positive discrepancy and idx 1 is negative discrepancy diff --git a/monai/apps/deepgrow/interaction.py b/monai/apps/deepgrow/interaction.py index c134d45d22..41ebd2fbe7 100644 --- a/monai/apps/deepgrow/interaction.py +++ b/monai/apps/deepgrow/interaction.py @@ -46,7 +46,6 @@ def __init__( train: bool, key_probability: str = "probability", ) -> None: - if not isinstance(transforms, Compose): transforms = Compose(transforms) diff --git a/monai/apps/detection/utils/anchor_utils.py b/monai/apps/detection/utils/anchor_utils.py index 379c972f6d..12bf90585d 100644 --- a/monai/apps/detection/utils/anchor_utils.py +++ b/monai/apps/detection/utils/anchor_utils.py @@ -373,7 +373,6 @@ def __init__( | Sequence[Sequence[float]] = ((32, 32, 32), (48, 20, 20), (20, 48, 20), (20, 20, 48)), indexing: str = "ij", ) -> None: - nn.Module.__init__(self) spatial_dims = len(base_anchor_shapes[0]) diff --git a/monai/apps/nuclick/transforms.py b/monai/apps/nuclick/transforms.py index a9fbaa3144..7ec416dabb 100644 --- a/monai/apps/nuclick/transforms.py +++ b/monai/apps/nuclick/transforms.py @@ -151,7 +151,6 @@ def __init__( others_value: int = 0, to_binary_mask: bool = True, ): - super().__init__(keys, allow_missing_keys=False) self.others = others self.mask_value = mask_value diff --git a/monai/apps/pathology/metrics/lesion_froc.py b/monai/apps/pathology/metrics/lesion_froc.py index 67e8a8510a..d216fa1d2e 100644 --- a/monai/apps/pathology/metrics/lesion_froc.py +++ b/monai/apps/pathology/metrics/lesion_froc.py @@ -74,7 +74,6 @@ def __init__( nms_box_size: int = 48, image_reader_name: str = "cuCIM", ) -> None: - self.data = data self.grow_distance = grow_distance self.itc_diameter = itc_diameter diff --git a/monai/apps/pathology/transforms/post/array.py b/monai/apps/pathology/transforms/post/array.py index ba005706a5..5289dc101c 100644 --- a/monai/apps/pathology/transforms/post/array.py +++ b/monai/apps/pathology/transforms/post/array.py @@ -252,7 +252,6 @@ class GenerateDistanceMap(Transform): backend = [TransformBackends.NUMPY] def __init__(self, smooth_fn: Callable | None = None, dtype: DtypeLike = np.float32) -> None: - self.smooth_fn = smooth_fn if smooth_fn is not None else GaussianSmooth() self.dtype = dtype diff --git a/monai/apps/pathology/transforms/spatial/array.py b/monai/apps/pathology/transforms/spatial/array.py index 9733156c85..ea8a8c89a9 100644 --- a/monai/apps/pathology/transforms/spatial/array.py +++ b/monai/apps/pathology/transforms/spatial/array.py @@ -165,7 +165,6 @@ def __init__( raise ValueError("Unsupported filter_mode, must be [min, max or random]: " + str(self.filter_mode)) def randomize(self, img_size: Sequence[int]) -> None: - c, h, w = img_size self.offset = (0, 0) @@ -239,7 +238,6 @@ def __call__(self, image: NdarrayOrTensor) -> NdarrayOrTensor: else: if len(img_np) > self.tile_count: - if self.filter_mode == "min": # default, keep non-background tiles (smallest values) idxs = np.argsort(img_np.sum(axis=(1, 2, 3)))[: self.tile_count] diff --git a/monai/apps/pathology/transforms/spatial/dictionary.py b/monai/apps/pathology/transforms/spatial/dictionary.py index a304ea6d62..8166a5891d 100644 --- a/monai/apps/pathology/transforms/spatial/dictionary.py +++ b/monai/apps/pathology/transforms/spatial/dictionary.py @@ -121,7 +121,6 @@ def randomize(self, data: Any = None) -> None: def __call__( self, data: Mapping[Hashable, NdarrayOrTensor] ) -> dict[Hashable, NdarrayOrTensor] | list[dict[Hashable, NdarrayOrTensor]]: - self.randomize() d = dict(data) diff --git a/monai/apps/reconstruction/transforms/dictionary.py b/monai/apps/reconstruction/transforms/dictionary.py index f475b9870d..11454b0b6b 100644 --- a/monai/apps/reconstruction/transforms/dictionary.py +++ b/monai/apps/reconstruction/transforms/dictionary.py @@ -213,7 +213,6 @@ class ReferenceBasedSpatialCropd(Cropd): """ def __init__(self, keys: KeysCollection, ref_key: str, allow_missing_keys: bool = False) -> None: - super().__init__(keys, cropper=None, allow_missing_keys=allow_missing_keys) # type: ignore self.ref_key = ref_key diff --git a/monai/auto3dseg/analyzer.py b/monai/auto3dseg/analyzer.py index af24b5c907..a0eeb0a9ca 100644 --- a/monai/auto3dseg/analyzer.py +++ b/monai/auto3dseg/analyzer.py @@ -199,7 +199,6 @@ class ImageStats(Analyzer): """ def __init__(self, image_key: str, stats_name: str = "image_stats") -> None: - if not isinstance(image_key, str): raise ValueError("image_key input must be str") @@ -292,7 +291,6 @@ class FgImageStats(Analyzer): """ def __init__(self, image_key: str, label_key: str, stats_name: str = "image_foreground_stats"): - self.image_key = image_key self.label_key = label_key @@ -375,7 +373,6 @@ class LabelStats(Analyzer): """ def __init__(self, image_key: str, label_key: str, stats_name: str = "label_stats", do_ccp: bool | None = True): - self.image_key = image_key self.label_key = label_key self.do_ccp = do_ccp @@ -857,7 +854,6 @@ def __init__( hist_bins: list[int] | int | None = None, hist_range: list | None = None, ): - self.image_key = image_key # set defaults diff --git a/monai/auto3dseg/seg_summarizer.py b/monai/auto3dseg/seg_summarizer.py index 22db58f76e..deeafa212e 100644 --- a/monai/auto3dseg/seg_summarizer.py +++ b/monai/auto3dseg/seg_summarizer.py @@ -89,7 +89,6 @@ def __init__( hist_range: list | None = None, histogram_only: bool = False, ) -> None: - self.image_key = image_key self.label_key = label_key # set defaults diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 16620b53c7..7567c9c7b0 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -191,7 +191,6 @@ def print_system_info(file=sys.stdout) -> None: def get_gpu_info() -> OrderedDict: - output: OrderedDict = OrderedDict() num_gpus = torch.cuda.device_count() diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index c1cfcfd8ca..d03028d198 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -616,7 +616,7 @@ def get_data(self, data) -> tuple[np.ndarray, dict]: img_array: list[np.ndarray] = [] compatible_meta: dict = {} - for (data_array, metadata) in ensure_tuple(dicom_data): + for data_array, metadata in ensure_tuple(dicom_data): img_array.append(np.ascontiguousarray(np.swapaxes(data_array, 0, 1) if self.swap_ij else data_array)) affine = self._get_affine(metadata, self.affine_lps_to_ras) metadata[MetaKeys.SPACE] = SpaceKeys.RAS if self.affine_lps_to_ras else SpaceKeys.LPS diff --git a/monai/data/thread_buffer.py b/monai/data/thread_buffer.py index e86a4043a2..fc7826fb15 100644 --- a/monai/data/thread_buffer.py +++ b/monai/data/thread_buffer.py @@ -66,7 +66,6 @@ def stop(self): self.gen_thread = None def __iter__(self): - self.is_running = True self.gen_thread = Thread(target=self.enqueue_values, daemon=True) self.gen_thread.start() diff --git a/monai/data/utils.py b/monai/data/utils.py index 96e3e15d95..5d6869334b 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -1073,7 +1073,6 @@ def compute_importance_map( if mode == BlendMode.CONSTANT: importance_map = torch.ones(patch_size, device=device, dtype=torch.float) elif mode == BlendMode.GAUSSIAN: - sigma_scale = ensure_tuple_rep(sigma_scale, len(patch_size)) sigmas = [i * sigma_s for i, sigma_s in zip(patch_size, sigma_scale)] diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index 468ffdb19a..0829c07981 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -298,7 +298,6 @@ def _iteration(self, engine: SupervisedEvaluator, batchdata: dict[str, torch.Ten # execute forward computation with engine.mode(engine.network): - if engine.amp: with torch.cuda.amp.autocast(**engine.amp_kwargs): engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs) diff --git a/monai/losses/ds_loss.py b/monai/losses/ds_loss.py index f6b6bcc14b..da460178ca 100644 --- a/monai/losses/ds_loss.py +++ b/monai/losses/ds_loss.py @@ -71,7 +71,6 @@ def get_loss(self, input: torch.Tensor, target: torch.Tensor): return self.loss(input, target) def forward(self, input: torch.Tensor | list[torch.Tensor], target: torch.Tensor): - if isinstance(input, (list, tuple)): weights = self.get_weights(levels=len(input)) loss = torch.tensor(0, dtype=torch.float, device=target.device) diff --git a/monai/losses/quicknatLoss.py b/monai/losses/quicknatLoss.py new file mode 100644 index 0000000000..a0af0cc981 --- /dev/null +++ b/monai/losses/quicknatLoss.py @@ -0,0 +1,179 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Description +++++++++++++++++++++++ +Addition CombinedLosses module which is not part of standard monai loss library. + +Usage +++++++++++++++++++++++ +Import the package and Instantiate any loss class you want to you:: + + from nn_common_modules import losses as additional_losses + loss = additional_losses.CombinedLoss() + +Members +++++++++++++++++++++++ +""" +from __future__ import annotations + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.modules.loss import _Loss, _WeightedLoss + + +class DiceLoss(_WeightedLoss): + """ + Dice Loss for a batch of samples + """ + + def forward(self, output, target, weights=None, ignore_index=None, binary=False): + """ + Forward pass + + :param output: NxCxHxW logits + :param target: NxHxW LongTensor + :param weights: C FloatTensor + :param ignore_index: int index to ignore from loss + :param binary: bool for binarized one chaneel(C=1) input + :return: torch.tensor + """ + output = F.softmax(output, dim=1) + if binary: + return self._dice_loss_binary(output, target) + return self._dice_loss_multichannel(output, target, weights, ignore_index) + + @staticmethod + def _dice_loss_binary(output, target): + """ + Dice loss for one channel binarized input + + :param output: Nx1xHxW logits + :param target: NxHxW LongTensor + :return: + """ + eps = 0.0001 + + intersection = output * target + numerator = 2 * intersection.sum(0).sum(1).sum(1) + denominator = output + target + denominator = denominator.sum(0).sum(1).sum(1) + eps + loss_per_channel = 1 - (numerator / denominator) + + return loss_per_channel.sum() / output.size(1) + + @staticmethod + def _dice_loss_multichannel(output, target, weights=None, ignore_index=None): + """ + Forward pass + + :param output: NxCxHxW Variable + :param target: NxHxW LongTensor + :param weights: C FloatTensor + :param ignore_index: int index to ignore from loss + :param binary: bool for binarized one chaneel(C=1) input + :return: + """ + eps = 0.0001 + encoded_target = output.detach() * 0 + + if ignore_index is not None: + mask = target == ignore_index + target = target.clone() + target[mask] = 0 + encoded_target.scatter_(1, target.unsqueeze(1), 1) + mask = mask.unsqueeze(1).expand_as(encoded_target) + encoded_target[mask] = 0 + else: + encoded_target.scatter_(1, target.unsqueeze(1), 1) + + if weights is None: + weights = 1 + + intersection = output * encoded_target + numerator = 2 * intersection.sum(0).sum(1).sum(1) + denominator = output + encoded_target + + if ignore_index is not None: + denominator[mask] = 0 + denominator = denominator.sum(0).sum(1).sum(1) + eps + loss_per_channel = weights * (1 - (numerator / denominator)) + + return loss_per_channel.sum() / output.size(1) + + +class CrossEntropyLoss2d(_WeightedLoss): + """ + Standard pytorch weighted nn.CrossEntropyLoss + """ + + def __init__(self, weight_mfb=None): + super(CrossEntropyLoss2d, self).__init__() + self.nll_loss = nn.CrossEntropyLoss(weight_mfb) + + def forward(self, inputs, targets): + """ + Forward pass + + :param inputs: torch.tensor (NxC) + :param targets: torch.tensor (N) + :return: scalar + """ + return self.nll_loss(inputs, targets) + + +class CombinedLoss(_Loss): + """ + A combination of dice and cross entropy loss + """ + + def __init__(self, weight_mfb=None): + super(CombinedLoss, self).__init__() + self.cross_entropy_loss = CrossEntropyLoss2d(weight_mfb) + self.dice_loss = DiceLoss() + self.softmax = True + self.sigmoid = False + self.to_onehot_y = True + self.other_act = None + + def forward(self, input, target, weight=None): + """ + Forward pass + needs following forms + input: torch.tensor (NxCxHxW) + target: torch.tensor (NxHxW) + weight: torch.tensor (NxHxW) + to comform with monai standards accept + :params:input: torch.tensor (NxCxHxW) + :params:target: torch.tensor (NxCxHxW) + :params:weight: torch.tensor (NxCxHxW) + :return: scalar + """ + # transform of target and weight + target = target.type(torch.LongTensor) + target = torch.argmax(target, dim=1) + + if weight is not None: + weight = weight.type(torch.LongTensor) + weight = torch.argmax(weight, dim=1) + + input_soft = F.softmax(input, dim=1) + + y_2 = torch.mean(self.dice_loss(input_soft, target)) + + if weight is None: + y_1 = torch.mean(self.cross_entropy_loss.forward(input, target)) + + else: + y_1 = torch.mean(torch.mul(self.cross_entropy_loss.forward(input, target), weight)) + + return y_1 + y_2 diff --git a/monai/losses/ssim_loss.py b/monai/losses/ssim_loss.py index 0bb6163770..e8e5d0c2ba 100644 --- a/monai/losses/ssim_loss.py +++ b/monai/losses/ssim_loss.py @@ -87,7 +87,6 @@ def forward(self, x: torch.Tensor, y: torch.Tensor, data_range: torch.Tensor) -> data_range, self.win_size, self.k1, self.k2, self.spatial_dims )._compute_tensor(x, y) elif x.shape[0] > 1: - for i in range(x.shape[0]): ssim_val: torch.Tensor = SSIMMetric( data_range, self.win_size, self.k1, self.k2, self.spatial_dims diff --git a/monai/metrics/regression.py b/monai/metrics/regression.py index ecab61308b..405d9ae716 100644 --- a/monai/metrics/regression.py +++ b/monai/metrics/regression.py @@ -278,7 +278,6 @@ def __init__( self.w = torch.ones([1, 1] + [win_size for _ in range(spatial_dims)]) / win_size**spatial_dims def _compute_intermediate_statistics(self, x: torch.Tensor, y: torch.Tensor) -> tuple[torch.Tensor, ...]: - data_range = self.data_range[(None,) * (self.spatial_dims + 2)] # determine whether to work with 2D convolution or 3D conv = getattr(F, f"conv{self.spatial_dims}d") diff --git a/monai/networks/blocks/crf.py b/monai/networks/blocks/crf.py index 5bbad4dff4..398b89882a 100644 --- a/monai/networks/blocks/crf.py +++ b/monai/networks/blocks/crf.py @@ -92,7 +92,6 @@ def forward(self, input_tensor: torch.Tensor, reference_tensor: torch.Tensor): # mean field loop for _ in range(self.iterations): - # message passing step for both kernels bilateral_output = PHLFilter.apply(output_tensor, bilateral_features) gaussian_output = PHLFilter.apply(output_tensor, gaussian_features) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index dfb39adb34..afd3183581 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -77,7 +77,6 @@ def __init__( dropout: int | None = None, bias: bool = True, ): - self.spatial_dims = spatial_dims self.kernel_size = kernel_size self.num_res_units = num_res_units diff --git a/monai/networks/blocks/dynunet_block.py b/monai/networks/blocks/dynunet_block.py index 6b689cd0ab..12afab3464 100644 --- a/monai/networks/blocks/dynunet_block.py +++ b/monai/networks/blocks/dynunet_block.py @@ -301,7 +301,6 @@ def get_conv_layer( def get_padding(kernel_size: Sequence[int] | int, stride: Sequence[int] | int) -> tuple[int, ...] | int: - kernel_size_np = np.atleast_1d(kernel_size) stride_np = np.atleast_1d(stride) padding_np = (kernel_size_np - stride_np + 1) / 2 diff --git a/monai/networks/blocks/segresnet_block.py b/monai/networks/blocks/segresnet_block.py index 01fc907ab7..3337f50043 100644 --- a/monai/networks/blocks/segresnet_block.py +++ b/monai/networks/blocks/segresnet_block.py @@ -22,7 +22,6 @@ def get_conv_layer( spatial_dims: int, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, bias: bool = False ): - return Convolution( spatial_dims, in_channels, out_channels, strides=stride, kernel_size=kernel_size, bias=bias, conv_only=True ) @@ -78,7 +77,6 @@ def __init__( self.conv2 = get_conv_layer(spatial_dims, in_channels=in_channels, out_channels=in_channels) def forward(self, x): - identity = x x = self.norm1(x) diff --git a/monai/networks/blocks/squeeze_and_excitation.py b/monai/networks/blocks/squeeze_and_excitation.py index 0928cdb641..665e9020ff 100644 --- a/monai/networks/blocks/squeeze_and_excitation.py +++ b/monai/networks/blocks/squeeze_and_excitation.py @@ -247,7 +247,6 @@ def __init__( stride: int = 1, downsample: Convolution | None = None, ) -> None: - conv_param_1 = { "strides": 1, "kernel_size": 1, @@ -298,7 +297,6 @@ def __init__( stride: int = 1, downsample: Convolution | None = None, ) -> None: - conv_param_1 = { "strides": stride, "kernel_size": 1, @@ -348,7 +346,6 @@ def __init__( downsample: Convolution | None = None, base_width: int = 4, ) -> None: - conv_param_1 = { "strides": 1, "kernel_size": 1, diff --git a/monai/networks/layers/filtering.py b/monai/networks/layers/filtering.py index 4ed159c609..b8bfe9a3bc 100644 --- a/monai/networks/layers/filtering.py +++ b/monai/networks/layers/filtering.py @@ -85,7 +85,6 @@ class PHLFilter(torch.autograd.Function): @staticmethod def forward(ctx, input, features, sigmas=None): - scaled_features = features if sigmas is not None: for i in range(features.size(1)): diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index c5398b533e..a1122ceaa2 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -321,7 +321,6 @@ class SavitzkyGolayFilter(nn.Module): """ def __init__(self, window_length: int, order: int, axis: int = 2, mode: str = "zeros"): - super().__init__() if order >= window_length: raise ValueError("order must be less than window_length.") @@ -365,7 +364,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: @staticmethod def _make_coeffs(window_length, order): - half_length, rem = divmod(window_length, 2) if rem == 0: raise ValueError("window_length must be odd.") @@ -391,7 +389,6 @@ class HilbertTransform(nn.Module): """ def __init__(self, axis: int = 2, n: int | None = None) -> None: - super().__init__() self.axis = axis self.n = n diff --git a/monai/networks/nets/ahnet.py b/monai/networks/nets/ahnet.py index 0080180d75..ae248c0cd1 100644 --- a/monai/networks/nets/ahnet.py +++ b/monai/networks/nets/ahnet.py @@ -25,7 +25,6 @@ class Bottleneck3x3x1(nn.Module): - expansion = 4 def __init__( @@ -36,7 +35,6 @@ def __init__( stride: Sequence[int] | int = 1, downsample: nn.Sequential | None = None, ) -> None: - super().__init__() conv_type = Conv[Conv.CONV, spatial_dims] @@ -275,13 +273,11 @@ def __init__(self, spatial_dims: int, psp_block_num: int, in_ch: int, upsample_m def forward(self, x: torch.Tensor) -> torch.Tensor: outputs = [] if self.upsample_mode == "transpose": - for (project_module, pool_module, up_module) in zip( - self.project_modules, self.pool_modules, self.up_modules - ): + for project_module, pool_module, up_module in zip(self.project_modules, self.pool_modules, self.up_modules): output = up_module(project_module(pool_module(x))) outputs.append(output) else: - for (project_module, pool_module) in zip(self.project_modules, self.pool_modules): + for project_module, pool_module in zip(self.project_modules, self.pool_modules): interpolate_size = x.shape[2:] align_corners: bool | None = None if self.upsample_mode in ["trilinear", "bilinear"]: diff --git a/monai/networks/nets/autoencoder.py b/monai/networks/nets/autoencoder.py index 19906f3ba8..8f093bcc1d 100644 --- a/monai/networks/nets/autoencoder.py +++ b/monai/networks/nets/autoencoder.py @@ -105,7 +105,6 @@ def __init__( dropout: tuple | str | float | None = None, bias: bool = True, ) -> None: - super().__init__() self.dimensions = spatial_dims self.in_channels = in_channels diff --git a/monai/networks/nets/densenet.py b/monai/networks/nets/densenet.py index d822330347..2100272d91 100644 --- a/monai/networks/nets/densenet.py +++ b/monai/networks/nets/densenet.py @@ -181,7 +181,6 @@ def __init__( norm: str | tuple = "batch", dropout_prob: float = 0.0, ) -> None: - super().__init__() conv_type: type[nn.Conv1d | nn.Conv2d | nn.Conv3d] = Conv[Conv.CONV, spatial_dims] diff --git a/monai/networks/nets/dints.py b/monai/networks/nets/dints.py index 0135a544ca..90aac9eaae 100644 --- a/monai/networks/nets/dints.py +++ b/monai/networks/nets/dints.py @@ -568,7 +568,6 @@ def __init__( use_downsample: bool = True, device: str = "cpu", ): - super().__init__() n_feats = tuple([32 * (2**_i) for _i in range(num_depths + 1)]) diff --git a/monai/networks/nets/flexible_unet.py b/monai/networks/nets/flexible_unet.py index 6de629fc6b..fdb3376748 100644 --- a/monai/networks/nets/flexible_unet.py +++ b/monai/networks/nets/flexible_unet.py @@ -123,7 +123,6 @@ def __init__( align_corners: bool | None, is_pad: bool, ): - super().__init__() if len(encoder_channels) < 2: raise ValueError("the length of `encoder_channels` should be no less than 2.") @@ -196,7 +195,6 @@ def __init__( act: tuple | str | None = None, scale_factor: float = 1.0, ): - conv_layer = Conv[Conv.CONV, spatial_dims]( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=kernel_size // 2 ) diff --git a/monai/networks/nets/highresnet.py b/monai/networks/nets/highresnet.py index 7dd99172e1..e71f8d193d 100644 --- a/monai/networks/nets/highresnet.py +++ b/monai/networks/nets/highresnet.py @@ -147,7 +147,6 @@ def __init__( layer_params: Sequence[dict] = DEFAULT_LAYER_PARAMS_3D, channel_matching: ChannelMatching | str = ChannelMatching.PAD, ) -> None: - super().__init__() blocks = nn.ModuleList() @@ -168,7 +167,7 @@ def __init__( ) # residual blocks - for (idx, params) in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers. + for idx, params in enumerate(layer_params[1:-2]): # res blocks except the 1st and last two conv layers. _in_chns, _out_chns = _out_chns, params["n_features"] _dilation = 2**idx for _ in range(params["repeat"]): diff --git a/monai/networks/nets/hovernet.py b/monai/networks/nets/hovernet.py index 4ab106f674..323e107fd7 100644 --- a/monai/networks/nets/hovernet.py +++ b/monai/networks/nets/hovernet.py @@ -92,7 +92,6 @@ def __init__( self.layers.add_module("dropout", dropout_type(dropout_prob)) def forward(self, x: torch.Tensor) -> torch.Tensor: - x1 = self.layers(x) if x1.shape[-1] != x.shape[-1]: trim = (x.shape[-1] - x1.shape[-1]) // 2 @@ -294,7 +293,6 @@ def __init__( self.requires_grad_(False) def forward(self, x: torch.Tensor) -> torch.Tensor: - sc = self.shortcut(x) if self.shortcut.stride == (2, 2): @@ -388,7 +386,6 @@ def __init__( ) def forward(self, xin: torch.Tensor, short_cuts: list[torch.Tensor]) -> torch.Tensor: - block_number = len(short_cuts) - 1 x = xin + short_cuts[block_number] @@ -466,7 +463,6 @@ def __init__( adapt_standard_resnet: bool = False, freeze_encoder: bool = False, ) -> None: - super().__init__() if isinstance(mode, str): @@ -576,7 +572,6 @@ def __init__( _load_pretrained_encoder(self, weights) def forward(self, x: torch.Tensor) -> dict[str, torch.Tensor]: - if self.mode == HoVerNetMode.ORIGINAL.value: if x.shape[-1] != 270 or x.shape[-2] != 270: raise ValueError("Input size should be 270 x 270 when using HoVerNetMode.ORIGINAL") @@ -607,7 +602,6 @@ def forward(self, x: torch.Tensor) -> dict[str, torch.Tensor]: def _load_pretrained_encoder(model: nn.Module, state_dict: OrderedDict | dict): - model_dict = model.state_dict() state_dict = { k: v for k, v in state_dict.items() if (k in model_dict) and (model_dict[k].shape == state_dict[k].shape) @@ -618,7 +612,6 @@ def _load_pretrained_encoder(model: nn.Module, state_dict: OrderedDict | dict): def _remap_preact_resnet_model(model_url: str): - pattern_conv0 = re.compile(r"^(conv0\.\/)(.+)$") pattern_block = re.compile(r"^(d\d+)\.(.+)$") pattern_layer = re.compile(r"^(.+\.d\d+)\.units\.(\d+)(.+)$") @@ -647,7 +640,6 @@ def _remap_preact_resnet_model(model_url: str): def _remap_standard_resnet_model(model_url: str): - pattern_conv0 = re.compile(r"^conv1\.(.+)$") pattern_bn1 = re.compile(r"^bn1\.(.+)$") pattern_block = re.compile(r"^layer(\d+)\.(\d+)\.(.+)$") diff --git a/monai/networks/nets/milmodel.py b/monai/networks/nets/milmodel.py index f6b905bfda..0a25b7feec 100644 --- a/monai/networks/nets/milmodel.py +++ b/monai/networks/nets/milmodel.py @@ -61,7 +61,6 @@ def __init__( trans_blocks: int = 4, trans_dropout: float = 0.0, ) -> None: - super().__init__() if num_classes <= 0: @@ -75,7 +74,6 @@ def __init__( self.transformer: nn.Module | None = None if backbone is None: - net = models.resnet50(pretrained=pretrained) nfc = net.fc.in_features # save the number of final features net.fc = torch.nn.Identity() # remove final linear layer @@ -96,7 +94,6 @@ def hook(module, input, output): net.layer4.register_forward_hook(forward_hook("layer4")) elif isinstance(backbone, str): - # assume torchvision model string is provided torch_model = getattr(models, backbone, None) if torch_model is None: @@ -137,7 +134,6 @@ def hook(module, input, output): self.attention = nn.Sequential(nn.Linear(nfc, 2048), nn.Tanh(), nn.Linear(2048, 1)) elif self.mil_mode == "att_trans_pyramid": - transformer_list = nn.ModuleList( [ nn.TransformerEncoder( @@ -174,7 +170,6 @@ def hook(module, input, output): self.net = net def calc_head(self, x: torch.Tensor) -> torch.Tensor: - sh = x.shape if self.mil_mode == "mean": @@ -186,7 +181,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: x, _ = torch.max(x, dim=1) elif self.mil_mode == "att": - a = self.attention(x) a = torch.softmax(a, dim=1) x = torch.sum(x * a, dim=1) @@ -194,7 +188,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: x = self.myfc(x) elif self.mil_mode == "att_trans" and self.transformer is not None: - x = x.permute(1, 0, 2) x = self.transformer(x) x = x.permute(1, 0, 2) @@ -206,7 +199,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: x = self.myfc(x) elif self.mil_mode == "att_trans_pyramid" and self.transformer is not None: - l1 = torch.mean(self.extra_outputs["layer1"], dim=(2, 3)).reshape(sh[0], sh[1], -1).permute(1, 0, 2) l2 = torch.mean(self.extra_outputs["layer2"], dim=(2, 3)).reshape(sh[0], sh[1], -1).permute(1, 0, 2) l3 = torch.mean(self.extra_outputs["layer3"], dim=(2, 3)).reshape(sh[0], sh[1], -1).permute(1, 0, 2) @@ -233,7 +225,6 @@ def calc_head(self, x: torch.Tensor) -> torch.Tensor: return x def forward(self, x: torch.Tensor, no_head: bool = False) -> torch.Tensor: - sh = x.shape x = x.reshape(sh[0] * sh[1], sh[2], sh[3], sh[4]) diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index c9e34d093c..02869d415f 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -198,7 +198,6 @@ def __init__( feed_forward: bool = True, bias_downsample: bool = True, # for backwards compatibility (also see PR #5477) ) -> None: - super().__init__() if isinstance(block, str): @@ -268,7 +267,6 @@ def _make_layer( shortcut_type: str, stride: int = 1, ) -> nn.Sequential: - conv_type: Callable = Conv[Conv.CONV, spatial_dims] norm_type: Callable = Norm[Norm.BATCH, spatial_dims] diff --git a/monai/networks/nets/segresnet_ds.py b/monai/networks/nets/segresnet_ds.py index 115bec455a..07f3824b51 100644 --- a/monai/networks/nets/segresnet_ds.py +++ b/monai/networks/nets/segresnet_ds.py @@ -152,7 +152,6 @@ def __init__( head_module: nn.Module | None = None, anisotropic_scales: tuple | None = None, ): - super().__init__() if spatial_dims not in (1, 2, 3): @@ -215,7 +214,6 @@ def __init__( self.spatial_dims = spatial_dims def _forward(self, x: torch.Tensor) -> list[torch.Tensor]: - outputs = [] x = self.conv_init(x) @@ -273,7 +271,6 @@ def __init__( upsample_mode: UpsampleMode | str = "deconv", resolution: tuple | None = None, ): - super().__init__() if spatial_dims not in (1, 2, 3): @@ -330,7 +327,6 @@ def __init__( self.up_layers = nn.ModuleList() for i in range(n_up): - filters = filters // 2 kernel_size, _, stride = ( aniso_kernel(anisotropic_scales[len(blocks_up) - i - 1]) if anisotropic_scales else (3, 1, 2) @@ -392,7 +388,6 @@ def is_valid_shape(self, x): return all(a) def _forward(self, x: torch.Tensor) -> torch.Tensor | list[torch.Tensor]: - if self.preprocess is not None: x = self.preprocess(x) diff --git a/monai/networks/nets/senet.py b/monai/networks/nets/senet.py index f8ca5fefef..51435a9ea2 100644 --- a/monai/networks/nets/senet.py +++ b/monai/networks/nets/senet.py @@ -108,7 +108,6 @@ def __init__( input_3x3: bool = True, num_classes: int = 1000, ) -> None: - super().__init__() if isinstance(block, str): @@ -222,7 +221,6 @@ def _make_layer( stride: int = 1, downsample_kernel_size: int = 1, ) -> nn.Sequential: - downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = Convolution( diff --git a/monai/networks/nets/swin_unetr.py b/monai/networks/nets/swin_unetr.py index faaef88514..9f8204968f 100644 --- a/monai/networks/nets/swin_unetr.py +++ b/monai/networks/nets/swin_unetr.py @@ -246,7 +246,6 @@ def __init__( self.out = UnetOutBlock(spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels) def load_from(self, weights): - with torch.no_grad(): self.swinViT.patch_embed.proj.weight.copy_(weights["state_dict"]["module.patch_embed.proj.weight"]) self.swinViT.patch_embed.proj.bias.copy_(weights["state_dict"]["module.patch_embed.proj.bias"]) @@ -704,7 +703,6 @@ def __init__(self, dim: int, norm_layer: type[LayerNorm] = nn.LayerNorm, spatial self.norm = norm_layer(4 * dim) def forward(self, x): - x_shape = x.size() if len(x_shape) == 5: b, d, h, w, c = x_shape diff --git a/monai/networks/nets/transchex.py b/monai/networks/nets/transchex.py index 8f7ad33340..31e27ffbf2 100644 --- a/monai/networks/nets/transchex.py +++ b/monai/networks/nets/transchex.py @@ -76,7 +76,6 @@ def from_pretrained( with tarfile.open(resolved_archive_file, "r:gz") as archive: def is_within_directory(directory, target): - abs_directory = os.path.abspath(directory) abs_target = os.path.abspath(target) @@ -85,7 +84,6 @@ def is_within_directory(directory, target): return prefix == abs_directory def safe_extract(tar, path=".", members=None, *, numeric_owner=False): - for member in tar.getmembers(): member_path = os.path.join(path, member.name) if not is_within_directory(path, member_path): diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py index 8de12ae6db..a48aabf915 100644 --- a/monai/networks/nets/unet.py +++ b/monai/networks/nets/unet.py @@ -125,7 +125,6 @@ def __init__( bias: bool = True, adn_ordering: str = "NDA", ) -> None: - super().__init__() if len(channels) < 2: @@ -215,7 +214,6 @@ def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_ """ mod: nn.Module if self.num_res_units > 0: - mod = ResidualUnit( self.dimensions, in_channels, diff --git a/monai/networks/nets/varautoencoder.py b/monai/networks/nets/varautoencoder.py index 29131281de..6cb8d6e40b 100644 --- a/monai/networks/nets/varautoencoder.py +++ b/monai/networks/nets/varautoencoder.py @@ -91,7 +91,6 @@ def __init__( bias: bool = True, use_sigmoid: bool = True, ) -> None: - self.in_channels, *self.in_shape = in_shape self.use_sigmoid = use_sigmoid diff --git a/monai/transforms/adaptors.py b/monai/transforms/adaptors.py index a2f061d13d..5729740690 100644 --- a/monai/transforms/adaptors.py +++ b/monai/transforms/adaptors.py @@ -148,7 +148,6 @@ def map_only_names(ditems, input_map): return {v: ditems[k] for k, v in input_map.items()} def _inner(ditems): - sig = FunctionSignature(function) if sig.found_kwargs: @@ -218,7 +217,6 @@ def _inner(ditems): @_monai_export("monai.transforms") def apply_alias(fn, name_map): def _inner(data): - # map names pre_call = dict(data) for _from, _to in name_map.items(): diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 774acf1e31..1980ef508b 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -1069,7 +1069,6 @@ class SavitzkyGolaySmooth(Transform): backend = [TransformBackends.TORCH] def __init__(self, window_length: int, order: int, axis: int = 1, mode: str = "zeros"): - if axis < 0: raise ValueError("axis must be zero or positive.") @@ -1114,7 +1113,6 @@ class DetectEnvelope(Transform): backend = [TransformBackends.TORCH] def __init__(self, axis: int = 1, n: int | None = None) -> None: - if axis < 0: raise ValueError("axis must be zero or positive.") @@ -1506,7 +1504,6 @@ class GibbsNoise(Transform, Fourier): backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__(self, alpha: float = 0.1) -> None: - if alpha > 1 or alpha < 0: raise ValueError("alpha must take values in the interval [0, 1].") self.alpha = alpha @@ -1656,7 +1653,6 @@ class KSpaceSpikeNoise(Transform, Fourier): backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__(self, loc: tuple | Sequence[tuple], k_intensity: Sequence[float] | float | None = None): - self.loc = ensure_tuple(loc) self.k_intensity = k_intensity @@ -1790,7 +1786,6 @@ def __init__( intensity_range: Sequence[Sequence[float] | float] | None = None, channel_wise: bool = True, ): - self.intensity_range = intensity_range self.channel_wise = channel_wise self.sampled_k_intensity: list = [] @@ -2152,7 +2147,6 @@ class IntensityRemap(RandomizableTransform): """ def __init__(self, kernel_size: int = 30, slope: float = 0.7): - super().__init__() self.kernel_size = kernel_size @@ -2207,7 +2201,6 @@ class RandIntensityRemap(RandomizableTransform): """ def __init__(self, prob: float = 0.1, kernel_size: int = 30, slope: float = 0.7, channel_wise: bool = True): - RandomizableTransform.__init__(self, prob=prob) self.kernel_size = kernel_size self.slope = slope diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index a0e33557b9..790cb38671 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -1316,7 +1316,6 @@ def __init__( alpha: Sequence[float] = (0.0, 1.0), allow_missing_keys: bool = False, ) -> None: - MapTransform.__init__(self, keys, allow_missing_keys) RandomizableTransform.__init__(self, prob=prob) self.rand_gibbs_noise = RandGibbsNoise(alpha=alpha, prob=1.0) @@ -1363,12 +1362,10 @@ class GibbsNoised(MapTransform): backend = GibbsNoise.backend def __init__(self, keys: KeysCollection, alpha: float = 0.5, allow_missing_keys: bool = False) -> None: - MapTransform.__init__(self, keys, allow_missing_keys) self.transform = GibbsNoise(alpha) def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, NdarrayOrTensor]: - d = dict(data) for key in self.key_iterator(d): d[key] = self.transform(d[key]) @@ -1425,7 +1422,6 @@ def __init__( k_intensity: Sequence[float] | float | None = None, allow_missing_keys: bool = False, ) -> None: - super().__init__(keys, allow_missing_keys) self.transform = KSpaceSpikeNoise(loc, k_intensity) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 2bee350eff..3cb23a541d 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -1965,7 +1965,6 @@ def __call__(self, spatial_size: Sequence[int]) -> torch.Tensor: class Resample(Transform): - backend = [TransformBackends.TORCH, TransformBackends.NUMPY] def __init__( @@ -2867,7 +2866,6 @@ def __call__( class GridDistortion(Transform): - backend = [TransformBackends.TORCH] def __init__( @@ -2965,7 +2963,6 @@ def __call__( class RandGridDistortion(RandomizableTransform): - backend = [TransformBackends.TORCH] def __init__( diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index ff1565c23e..a378c5cc4f 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -205,7 +205,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]: d: dict = dict(data) - for (key, mode, padding_mode, align_corners, dtype, dst_key) in self.key_iterator( + for key, mode, padding_mode, align_corners, dtype, dst_key in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype, self.dst_keys ): d[key] = self.sp_transform( @@ -278,7 +278,7 @@ def __init__( def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torch.Tensor]: d = dict(data) - for (key, mode, padding_mode, align_corners, dtype) in self.key_iterator( + for key, mode, padding_mode, align_corners, dtype in self.key_iterator( d, self.mode, self.padding_mode, self.align_corners, self.dtype ): d[key] = self.resampler( diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 9175c37740..d1e07f7b88 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -1018,7 +1018,6 @@ def __call__( class ClassesToIndices(Transform): - backend = [TransformBackends.NUMPY, TransformBackends.TORCH] def __init__( @@ -1574,7 +1573,6 @@ class ImageFilter(Transform): ) def __init__(self, filter: str | NdarrayOrTensor | nn.Module, filter_size: int | None = None, **kwargs) -> None: - self._check_filter_format(filter, filter_size) self._check_kwargs_are_present(filter, **kwargs) self.filter = filter diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index 2e0bde21b4..2e2d647ef3 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -386,7 +386,6 @@ def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> dict[Hashable, N class SplitDimd(MapTransform): - backend = SplitDim.backend def __init__( diff --git a/monai/transforms/utils_create_transform_ims.py b/monai/transforms/utils_create_transform_ims.py index f040ea18aa..a98cdfe936 100644 --- a/monai/transforms/utils_create_transform_ims.py +++ b/monai/transforms/utils_create_transform_ims.py @@ -466,7 +466,6 @@ def create_transform_im( if __name__ == "__main__": - keys = [CommonKeys.IMAGE, CommonKeys.LABEL] data = get_data(keys) create_transform_im(RandFlip, dict(prob=1, spatial_axis=1), data) diff --git a/monai/utils/profiling.py b/monai/utils/profiling.py index 059f600347..da5c0ac05c 100644 --- a/monai/utils/profiling.py +++ b/monai/utils/profiling.py @@ -57,7 +57,6 @@ def torch_profiler_full(func): @wraps(func) def wrapper(*args, **kwargs): - with torch.autograd.profiler.profile(use_cuda=True) as prof: result = func(*args, **kwargs) @@ -77,7 +76,6 @@ def torch_profiler_time_cpu_gpu(func): @wraps(func) def wrapper(*args, **kwargs): - with torch.autograd.profiler.profile(use_cuda=True) as prof: result = func(*args, **kwargs) @@ -103,7 +101,6 @@ def torch_profiler_time_end_to_end(func): @wraps(func) def wrapper(*args, **kwargs): - torch.cuda.synchronize() start = perf_counter() diff --git a/monai/visualize/utils.py b/monai/visualize/utils.py index b74bebe4ba..f6718fe7a5 100644 --- a/monai/visualize/utils.py +++ b/monai/visualize/utils.py @@ -199,7 +199,6 @@ def blend_images( raise ValueError("image and label should have matching spatial sizes.") if isinstance(alpha, (np.ndarray, torch.Tensor)): if image.shape[1:] != alpha.shape[1:]: # pytype: disable=attribute-error,invalid-directive - raise ValueError("if alpha is image, size should match input image and label.") # rescale arrays to [0, 1] if desired diff --git a/monai/visualize/visualizer.py b/monai/visualize/visualizer.py index b267dc8f16..e7f5d9bbbe 100644 --- a/monai/visualize/visualizer.py +++ b/monai/visualize/visualizer.py @@ -29,7 +29,6 @@ def default_upsampler(spatial_size: Sized, align_corners: bool = False) -> Calla """ def up(x): - linear_mode = [InterpolateMode.LINEAR, InterpolateMode.BILINEAR, InterpolateMode.TRILINEAR] interp_mode = linear_mode[len(spatial_size) - 1] return F.interpolate(x, size=spatial_size, mode=str(interp_mode.value), align_corners=align_corners) diff --git a/tests/min_tests.py b/tests/min_tests.py index 5bf8179629..0eddc34c3f 100644 --- a/tests/min_tests.py +++ b/tests/min_tests.py @@ -210,7 +210,6 @@ def run_testsuit(): if __name__ == "__main__": - # testing import submodules from monai.utils.module import load_submodules diff --git a/tests/runner.py b/tests/runner.py index 96a1d4a5c4..7a7cc9f28f 100644 --- a/tests/runner.py +++ b/tests/runner.py @@ -114,7 +114,6 @@ def get_default_pattern(loader): if __name__ == "__main__": - # Parse input arguments args = parse_args() diff --git a/tests/test_anchor_box.py b/tests/test_anchor_box.py index 3d53d79155..c29296e8ae 100644 --- a/tests/test_anchor_box.py +++ b/tests/test_anchor_box.py @@ -44,7 +44,6 @@ class TestAnchorGenerator(unittest.TestCase): @parameterized.expand(TEST_CASES_2D) def test_anchor_2d(self, input_param, image_shape, feature_maps_shapes): - torch_anchor_utils, _ = optional_import("torchvision.models.detection.anchor_utils") image_list, _ = optional_import("torchvision.models.detection.image_list") diff --git a/tests/test_auto3dseg.py b/tests/test_auto3dseg.py index 383d55b8a3..6152b265db 100644 --- a/tests/test_auto3dseg.py +++ b/tests/test_auto3dseg.py @@ -150,7 +150,6 @@ class TestImageAnalyzer(Analyzer): """ def __init__(self, image_key="image", stats_name="test_image"): - self.image_key = image_key report_format = {"test_stats": None} @@ -176,7 +175,6 @@ def setUp(self): @parameterized.expand(SIM_CPU_TEST_CASES) def test_data_analyzer_cpu(self, input_params): - sim_dim = input_params["sim_dim"] label_key = input_params["label_key"] image_only = not bool(label_key) diff --git a/tests/test_auto3dseg_hpo.py b/tests/test_auto3dseg_hpo.py index 0b65adbd70..bc276a7f22 100644 --- a/tests/test_auto3dseg_hpo.py +++ b/tests/test_auto3dseg_hpo.py @@ -130,7 +130,6 @@ def setUp(self) -> None: @skip_if_no_cuda def test_run_algo(self) -> None: - algo_dict = self.history[0] algo_name = list(algo_dict.keys())[0] algo = algo_dict[algo_name] diff --git a/tests/test_bilateral_approx_cpu.py b/tests/test_bilateral_approx_cpu.py index 04dd39b227..da30d5d7de 100644 --- a/tests/test_bilateral_approx_cpu.py +++ b/tests/test_bilateral_approx_cpu.py @@ -367,7 +367,6 @@ class BilateralFilterTestCaseCpuApprox(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_approx(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = True @@ -381,7 +380,6 @@ def test_cpu_approx(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = True diff --git a/tests/test_bilateral_approx_cuda.py b/tests/test_bilateral_approx_cuda.py index 6ae3133719..924ff3253e 100644 --- a/tests/test_bilateral_approx_cuda.py +++ b/tests/test_bilateral_approx_cuda.py @@ -368,7 +368,6 @@ class BilateralFilterTestCaseCudaApprox(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_approx(self, test_case_description, sigmas, input, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -386,7 +385,6 @@ def test_cuda_approx(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cuda") fast_approx = True diff --git a/tests/test_bilateral_precise.py b/tests/test_bilateral_precise.py index 1d2a5918d8..7fc7e06726 100644 --- a/tests/test_bilateral_precise.py +++ b/tests/test_bilateral_precise.py @@ -367,7 +367,6 @@ class BilateralFilterTestCaseCpuPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = False @@ -381,7 +380,6 @@ def test_cpu_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") fast_approx = False @@ -402,7 +400,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expec class BilateralFilterTestCaseCudaPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -420,7 +417,6 @@ def test_cuda_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cuda_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cuda") fast_approx = False diff --git a/tests/test_cast_to_type.py b/tests/test_cast_to_type.py index 070549edc8..6dd994120c 100644 --- a/tests/test_cast_to_type.py +++ b/tests/test_cast_to_type.py @@ -39,7 +39,6 @@ class TestCastToType(unittest.TestCase): @parameterized.expand(TESTS) def test_type(self, out_dtype, input_data, expected_type): - result = CastToType(dtype=out_dtype)(input_data) self.assertEqual(result.dtype, get_equivalent_dtype(expected_type, type(result))) diff --git a/tests/test_compute_generalized_dice.py b/tests/test_compute_generalized_dice.py index 2d38e5e0b1..961feb0561 100644 --- a/tests/test_compute_generalized_dice.py +++ b/tests/test_compute_generalized_dice.py @@ -132,7 +132,6 @@ def test_nans(self, input_data, expected_value): # Samplewise tests @parameterized.expand([TEST_CASE_1, TEST_CASE_2]) def test_value_class(self, input_data, expected_value): - # same test as for compute_meandice vals = {} vals["y_pred"] = input_data.pop("y_pred") @@ -145,7 +144,6 @@ def test_value_class(self, input_data, expected_value): # Aggregation tests @parameterized.expand([TEST_CASE_4, TEST_CASE_5]) def test_nans_class(self, params, input_data, expected_value): - generalized_dice_score = GeneralizedDiceScore(**params) generalized_dice_score(**input_data) result = generalized_dice_score.aggregate() diff --git a/tests/test_compute_meandice.py b/tests/test_compute_meandice.py index 4b74e31847..8e765c6192 100644 --- a/tests/test_compute_meandice.py +++ b/tests/test_compute_meandice.py @@ -200,7 +200,6 @@ def test_nans(self, input_data, expected_value): # DiceMetric class tests @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_10]) def test_value_class(self, input_data, expected_value): - # same test as for compute_meandice vals = {} vals["y_pred"] = input_data.pop("y_pred") @@ -212,7 +211,6 @@ def test_value_class(self, input_data, expected_value): @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8]) def test_nans_class(self, params, input_data, expected_value): - dice_metric = DiceMetric(**params) dice_metric(**input_data) result, _ = dice_metric.aggregate() diff --git a/tests/test_compute_meaniou.py b/tests/test_compute_meaniou.py index 68f87493a2..2ec0472b12 100644 --- a/tests/test_compute_meaniou.py +++ b/tests/test_compute_meaniou.py @@ -200,7 +200,6 @@ def test_nans(self, input_data, expected_value): # MeanIoU class tests @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_10]) def test_value_class(self, input_data, expected_value): - # same test as for compute_meaniou vals = {} vals["y_pred"] = input_data.pop("y_pred") @@ -212,7 +211,6 @@ def test_value_class(self, input_data, expected_value): @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8]) def test_nans_class(self, params, input_data, expected_value): - iou_metric = MeanIoU(**params) iou_metric(**input_data) result, _ = iou_metric.aggregate() diff --git a/tests/test_compute_regression_metrics.py b/tests/test_compute_regression_metrics.py index 5cddce7d62..b0fde3afe9 100644 --- a/tests/test_compute_regression_metrics.py +++ b/tests/test_compute_regression_metrics.py @@ -61,7 +61,6 @@ def test_shape_reduction(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) @@ -104,7 +103,6 @@ def test_compare_numpy(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor_a = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) in_tensor_b = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) @@ -154,7 +152,6 @@ def test_same_input(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor = torch.rand((batch,) + (base,) * (spatial - 1)).to(device) @@ -180,7 +177,6 @@ def test_diff_input(self): for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: - # create random tensors in_tensor_a = torch.zeros((batch,) + (base,) * (spatial - 1)).to(device) in_tensor_b = torch.ones((batch,) + (base,) * (spatial - 1)).to(device) diff --git a/tests/test_crf_cpu.py b/tests/test_crf_cpu.py index 5f749119e7..e29a4d69eb 100644 --- a/tests/test_crf_cpu.py +++ b/tests/test_crf_cpu.py @@ -497,7 +497,6 @@ class CRFTestCaseCpu(unittest.TestCase): @parameterized.expand(TEST_CASES) def test(self, test_case_description, params, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu")) diff --git a/tests/test_crf_cuda.py b/tests/test_crf_cuda.py index 0c004b2825..8529e2e6de 100644 --- a/tests/test_crf_cuda.py +++ b/tests/test_crf_cuda.py @@ -498,7 +498,6 @@ class CRFTestCaseCuda(unittest.TestCase): @parameterized.expand(TEST_CASES) def test(self, test_case_description, params, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cuda")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cuda")) diff --git a/tests/test_crop_foregroundd.py b/tests/test_crop_foregroundd.py index 13158ca6c2..c23c4df339 100644 --- a/tests/test_crop_foregroundd.py +++ b/tests/test_crop_foregroundd.py @@ -21,7 +21,6 @@ TEST_POSITION, TESTS = [], [] for p in TEST_NDARRAYS_ALL: - TEST_POSITION.append( [ { diff --git a/tests/test_cumulative_average.py b/tests/test_cumulative_average.py index 2373b0511c..d815d9be77 100644 --- a/tests/test_cumulative_average.py +++ b/tests/test_cumulative_average.py @@ -34,7 +34,6 @@ class TestAverageMeter(unittest.TestCase): @parameterized.expand(TEST_CASE_1) def test_value_all(self, data): - # test orig self.run_test(data) diff --git a/tests/test_cumulative_average_dist.py b/tests/test_cumulative_average_dist.py index 9f45955544..17f4164838 100644 --- a/tests/test_cumulative_average_dist.py +++ b/tests/test_cumulative_average_dist.py @@ -25,7 +25,6 @@ class DistributedCumulativeAverage(DistTestCase): @DistCall(nnodes=1, nproc_per_node=2) def test_value(self): - rank = dist.get_rank() nprocs = dist.get_world_size() is_cuda = dist.get_backend() == dist.Backend.NCCL diff --git a/tests/test_dataset_summary.py b/tests/test_dataset_summary.py index 746c3d79cf..b1cc578f32 100644 --- a/tests/test_dataset_summary.py +++ b/tests/test_dataset_summary.py @@ -39,7 +39,6 @@ class TestDatasetSummary(unittest.TestCase): def test_spacing_intensity(self): set_determinism(seed=0) with tempfile.TemporaryDirectory() as tempdir: - for i in range(5): im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0) n = nib.Nifti1Image(im, np.eye(4)) @@ -75,7 +74,6 @@ def test_spacing_intensity(self): def test_anisotropic_spacing(self): with tempfile.TemporaryDirectory() as tempdir: - pixdims = [[1.0, 1.0, 5.0], [1.0, 1.0, 4.0], [1.0, 1.0, 4.5], [1.0, 1.0, 2.0], [1.0, 1.0, 1.0]] for i in range(5): im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0) diff --git a/tests/test_efficientnet.py b/tests/test_efficientnet.py index e67defa4a3..5bdad5a568 100644 --- a/tests/test_efficientnet.py +++ b/tests/test_efficientnet.py @@ -321,7 +321,6 @@ def test_drop_connect_layer(self): # testing 1D, 2D and 3D shape for rand_tensor_shape in [(512, 16, 4), (384, 16, 4, 4), (256, 16, 4, 4, 4)]: - # test validation mode, out tensor == in tensor training = False for p in p_list: diff --git a/tests/test_ensure_tuple.py b/tests/test_ensure_tuple.py index 3d0ef40dd3..dc6649ec4c 100644 --- a/tests/test_ensure_tuple.py +++ b/tests/test_ensure_tuple.py @@ -50,5 +50,4 @@ def test_value(self, input, expected_value, wrap_array=False): if __name__ == "__main__": - unittest.main() diff --git a/tests/test_fg_bg_to_indicesd.py b/tests/test_fg_bg_to_indicesd.py index 5d827b84d3..d0d1ae5fb6 100644 --- a/tests/test_fg_bg_to_indicesd.py +++ b/tests/test_fg_bg_to_indicesd.py @@ -20,7 +20,6 @@ TEST_CASES = [] for p in TEST_NDARRAYS: - TEST_CASES.append( [ {"keys": "label", "image_key": None, "image_threshold": 0.0, "output_shape": None}, diff --git a/tests/test_flexible_unet.py b/tests/test_flexible_unet.py index 4c66c903aa..aae0cf729a 100644 --- a/tests/test_flexible_unet.py +++ b/tests/test_flexible_unet.py @@ -47,12 +47,10 @@ def get_encoder_parameters(cls): @classmethod def num_channels_per_output(cls): - return [(32, 64, 128, 256, 512, 1024), (32, 64, 128, 256), (32, 64, 128, 256), (32, 64, 128, 256)] @classmethod def num_outputs(cls): - return [6, 4, 4, 4] @classmethod diff --git a/tests/test_generate_instance_contour.py b/tests/test_generate_instance_contour.py index 07a9f8525c..9058855e62 100644 --- a/tests/test_generate_instance_contour.py +++ b/tests/test_generate_instance_contour.py @@ -48,7 +48,6 @@ class TestGenerateInstanceContour(unittest.TestCase): @parameterized.expand(TEST_CASE) def test_shape(self, in_type, test_data, min_num_points, offset, expected): - inst_bbox = get_bbox(test_data[None]) inst_map = test_data[inst_bbox[0][0] : inst_bbox[0][1], inst_bbox[0][2] : inst_bbox[0][3]] result = GenerateInstanceContour(min_num_points=min_num_points)(in_type(inst_map[None]), offset=offset) diff --git a/tests/test_gmm.py b/tests/test_gmm.py index aede44a123..4ed3b956ff 100644 --- a/tests/test_gmm.py +++ b/tests/test_gmm.py @@ -275,7 +275,6 @@ def tearDown(self) -> None: @parameterized.expand(TEST_CASES) @skip_if_no_cuda def test_cuda(self, test_case_description, mixture_count, class_count, features, labels, expected): - # Device to run on device = torch.device("cuda") diff --git a/tests/test_handler_classification_saver.py b/tests/test_handler_classification_saver.py index a885cce7f7..905e326a66 100644 --- a/tests/test_handler_classification_saver.py +++ b/tests/test_handler_classification_saver.py @@ -28,7 +28,6 @@ class TestHandlerClassificationSaver(unittest.TestCase): def test_saved_content(self): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): engine.state.batch = decollate_batch(batch) diff --git a/tests/test_handler_logfile.py b/tests/test_handler_logfile.py index 9e8006644d..f09876ab0a 100644 --- a/tests/test_handler_logfile.py +++ b/tests/test_handler_logfile.py @@ -60,7 +60,6 @@ def test_filename(self): filename = "something_else.txt" with tempfile.TemporaryDirectory() as tempdir: - handler = LogfileHandler(output_dir=tempdir, filename=filename) handler.attach(self.engine) diff --git a/tests/test_handler_mlflow.py b/tests/test_handler_mlflow.py index c88caae0a9..99180860a7 100644 --- a/tests/test_handler_mlflow.py +++ b/tests/test_handler_mlflow.py @@ -60,7 +60,6 @@ def tearDown(self): def test_metrics_track(self): experiment_param = {"backbone": "efficientnet_b0"} with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): return [batch + 1.0] diff --git a/tests/test_handler_prob_map_producer.py b/tests/test_handler_prob_map_producer.py index 4fc44d612e..153a00b1ac 100644 --- a/tests/test_handler_prob_map_producer.py +++ b/tests/test_handler_prob_map_producer.py @@ -51,7 +51,6 @@ def __init__(self, name, size): ] def __getitem__(self, index): - image = np.ones((3, 2, 2)) * index metadata = { ProbMapKeys.COUNT.value: self.data[index][ProbMapKeys.COUNT.value], diff --git a/tests/test_handler_regression_metrics.py b/tests/test_handler_regression_metrics.py index 101862ae66..a06452c54d 100644 --- a/tests/test_handler_regression_metrics.py +++ b/tests/test_handler_regression_metrics.py @@ -66,7 +66,6 @@ def test_compute(self): # iterate over all variations and check shapes for different reduction functions for mt_fn, mt_fn_np in zip(metrics, metrics_np): - for batch in batch_dims: for spatial in spatial_dims: for base in base_dims: diff --git a/tests/test_handler_tb_image.py b/tests/test_handler_tb_image.py index 031dfe707f..8657e552f1 100644 --- a/tests/test_handler_tb_image.py +++ b/tests/test_handler_tb_image.py @@ -34,7 +34,6 @@ class TestHandlerTBImage(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_tb_image_shape(self, shape): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): engine.state.batch = decollate_batch(list(batch)) diff --git a/tests/test_handler_tb_stats.py b/tests/test_handler_tb_stats.py index 4e7f599afa..b135dee86e 100644 --- a/tests/test_handler_tb_stats.py +++ b/tests/test_handler_tb_stats.py @@ -27,7 +27,6 @@ class TestHandlerTBStats(unittest.TestCase): def test_metrics_print(self): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): return [batch + 1.0] @@ -50,7 +49,6 @@ def _update_metric(engine): def test_metrics_writer(self): with tempfile.TemporaryDirectory() as tempdir: - # set up engine def _train_func(engine, batch): return [batch + 1.0] diff --git a/tests/test_hilbert_transform.py b/tests/test_hilbert_transform.py index a34bdddc93..4c49aecd8b 100644 --- a/tests/test_hilbert_transform.py +++ b/tests/test_hilbert_transform.py @@ -23,7 +23,6 @@ def create_expected_numpy_output(input_datum, **kwargs): - x = np.fft.fft(input_datum.cpu().numpy() if input_datum.device.type == "cuda" else input_datum.numpy(), **kwargs) f = np.fft.fftfreq(x.shape[kwargs["axis"]]) u = np.heaviside(f, 0.5) diff --git a/tests/test_integration_classification_2d.py b/tests/test_integration_classification_2d.py index 16914073cb..da883724a0 100644 --- a/tests/test_integration_classification_2d.py +++ b/tests/test_integration_classification_2d.py @@ -58,7 +58,6 @@ def __getitem__(self, index): def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", num_workers=10): - monai.config.print_config() # define transforms for image and classification train_transforms = Compose( diff --git a/tests/test_inverse.py b/tests/test_inverse.py index c081d38dfe..0423c80d6b 100644 --- a/tests/test_inverse.py +++ b/tests/test_inverse.py @@ -72,7 +72,6 @@ from tests.utils import make_nifti_image, make_rand_affine if TYPE_CHECKING: - has_nib = True else: _, has_nib = optional_import("nibabel") @@ -454,7 +453,6 @@ def test_inverse(self, _, data_name, acceptable_diff, is_meta, *transforms): # skip this test if multiprocessing uses 'spawn', as the check is only basic anyway @skipUnless(torch.multiprocessing.get_start_method() == "spawn", "requires spawn") def test_fail(self): - t1 = SpatialPadd("image", [10, 5]) data = t1(self.all_data["2D"]) @@ -465,7 +463,6 @@ def test_fail(self): @parameterized.expand(N_SAMPLES_TESTS) def test_inverse_inferred_seg(self, extra_transform): - test_data = [] for _ in range(20): image, label = create_test_image_2d(100, 101) diff --git a/tests/test_inverse_collation.py b/tests/test_inverse_collation.py index aa8266710d..05e296e6b9 100644 --- a/tests/test_inverse_collation.py +++ b/tests/test_inverse_collation.py @@ -46,7 +46,6 @@ from tests.utils import make_nifti_image if TYPE_CHECKING: - has_nib = True else: _, has_nib = optional_import("nibabel") diff --git a/tests/test_k_space_spike_noise.py b/tests/test_k_space_spike_noise.py index cc0a4932af..4d820573a6 100644 --- a/tests/test_k_space_spike_noise.py +++ b/tests/test_k_space_spike_noise.py @@ -47,7 +47,6 @@ def get_data(im_shape, im_type): @parameterized.expand(TESTS) def test_same_result(self, im_shape, im_type, k_intensity): - im = self.get_data(im_shape, im_type) loc = [0, int(im.shape[1] / 2), 0] if len(im_shape) == 2 else [0, int(im.shape[1] / 2), 0, 0] t = KSpaceSpikeNoise(loc, k_intensity) @@ -63,7 +62,6 @@ def test_same_result(self, im_shape, im_type, k_intensity): @parameterized.expand(TESTS) def test_highlighted_kspace_pixel(self, im_shape, as_tensor_input, k_intensity): - im = self.get_data(im_shape, as_tensor_input) loc = [0, int(im.shape[1] / 2), 0] if len(im_shape) == 2 else [0, int(im.shape[1] / 2), 0, 0] t = KSpaceSpikeNoise(loc, k_intensity) diff --git a/tests/test_k_space_spike_noised.py b/tests/test_k_space_spike_noised.py index 8cd42d7c08..76a79d4b12 100644 --- a/tests/test_k_space_spike_noised.py +++ b/tests/test_k_space_spike_noised.py @@ -49,7 +49,6 @@ def get_data(im_shape, im_type): @parameterized.expand(TESTS) def test_same_result(self, im_shape, im_type): - data = self.get_data(im_shape, im_type) loc = [0] + [int(im_shape[i] / 2) for i in range(len(im_shape))] k_intensity = 10 @@ -66,7 +65,6 @@ def test_same_result(self, im_shape, im_type): @parameterized.expand(TESTS) def test_highlighted_kspace_pixel(self, im_shape, im_type): - data = self.get_data(im_shape, im_type) loc = [0] + [int(im_shape[i] / 2) for i in range(len(im_shape))] k_intensity = 10 diff --git a/tests/test_lr_finder.py b/tests/test_lr_finder.py index 4472a75716..c10016eeff 100644 --- a/tests/test_lr_finder.py +++ b/tests/test_lr_finder.py @@ -49,7 +49,6 @@ @unittest.skipUnless(has_pil, "requires PIL") class TestLRFinder(unittest.TestCase): def setUp(self): - self.root_dir = MONAIEnvVars.data_dir() if not self.root_dir: self.root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data") diff --git a/tests/test_masked_inference_wsi_dataset.py b/tests/test_masked_inference_wsi_dataset.py index e9cd6724ab..bb90f7900b 100644 --- a/tests/test_masked_inference_wsi_dataset.py +++ b/tests/test_masked_inference_wsi_dataset.py @@ -40,7 +40,6 @@ def prepare_data(*masks): - mask = np.zeros((HEIGHT // 2, WIDTH // 2)) mask[100, 100] = 1 np.save(masks[0], mask) diff --git a/tests/test_mlp.py b/tests/test_mlp.py index a9010b2944..8ad66ebc6e 100644 --- a/tests/test_mlp.py +++ b/tests/test_mlp.py @@ -24,7 +24,6 @@ for dropout_rate in np.linspace(0, 1, 4): for hidden_size in [128, 256, 512, 768]: for mlp_dim in [0, 1028, 2048, 3072]: - test_case = [ {"hidden_size": hidden_size, "mlp_dim": mlp_dim, "dropout_rate": dropout_rate}, (2, 512, hidden_size), diff --git a/tests/test_network_consistency.py b/tests/test_network_consistency.py index 2ca8c5a4b0..948e4d0615 100644 --- a/tests/test_network_consistency.py +++ b/tests/test_network_consistency.py @@ -49,7 +49,6 @@ def tearDown(self): ) @parameterized.expand(TESTS, skip_on_empty=True) def test_network_consistency(self, net_name, data_path, json_path): - print("Net name: " + net_name) print("Data path: " + data_path) print("JSON path: " + json_path) diff --git a/tests/test_nifti_endianness.py b/tests/test_nifti_endianness.py index 0e00e6077d..2539d95fd5 100644 --- a/tests/test_nifti_endianness.py +++ b/tests/test_nifti_endianness.py @@ -53,7 +53,6 @@ def setUp(self): @parameterized.expand(TESTS) @skipUnless(has_nib, "Requires NiBabel") def test_endianness(self, endianness, use_array, image_only): - hdr = nib.Nifti1Header(endianness=endianness) nii = nib.Nifti1Image(self.im, np.eye(4), header=hdr) nib.save(nii, self.fname) diff --git a/tests/test_pad_collation.py b/tests/test_pad_collation.py index 0af9d49b39..cd98f29abf 100644 --- a/tests/test_pad_collation.py +++ b/tests/test_pad_collation.py @@ -87,7 +87,6 @@ def tearDown(self) -> None: @parameterized.expand(TESTS) def test_pad_collation(self, t_type, collate_method, transform): - if t_type == dict: dataset = CacheDataset(self.dict_data, transform, progress=False) else: diff --git a/tests/test_phl_cpu.py b/tests/test_phl_cpu.py index a558ba2827..98a5018d8e 100644 --- a/tests/test_phl_cpu.py +++ b/tests/test_phl_cpu.py @@ -244,7 +244,6 @@ class PHLFilterTestCaseCpu(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu(self, test_case_description, sigmas, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu")) diff --git a/tests/test_phl_cuda.py b/tests/test_phl_cuda.py index 97c3db6c70..0ddfd5eaae 100644 --- a/tests/test_phl_cuda.py +++ b/tests/test_phl_cuda.py @@ -152,7 +152,6 @@ class PHLFilterTestCaseCuda(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda(self, test_case_description, sigmas, input, features, expected): - # Create input tensors input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cuda")) feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cuda")) diff --git a/tests/test_rand_k_space_spike_noised.py b/tests/test_rand_k_space_spike_noised.py index 37e8ebcf81..3e1c11b2d9 100644 --- a/tests/test_rand_k_space_spike_noised.py +++ b/tests/test_rand_k_space_spike_noised.py @@ -46,7 +46,6 @@ def get_data(im_shape, im_type): @parameterized.expand(TESTS) def test_same_result(self, im_shape, im_type): - data = self.get_data(im_shape, im_type) t = RandKSpaceSpikeNoised(KEYS, prob=1.0, intensity_range=(13, 15), channel_wise=True) diff --git a/tests/test_segresnet_ds.py b/tests/test_segresnet_ds.py index e8382f7079..a5b88f9724 100644 --- a/tests/test_segresnet_ds.py +++ b/tests/test_segresnet_ds.py @@ -81,7 +81,6 @@ def test_shape(self, input_param, input_shape, expected_shape): @parameterized.expand(TEST_CASE_SEGRESNET_DS2) def test_shape2(self, input_param, input_shape, expected_shape): - dsdepth = input_param.get("dsdepth", 1) net = SegResNetDS(**input_param).to(device) @@ -107,7 +106,6 @@ def test_shape2(self, input_param, input_shape, expected_shape): @parameterized.expand(TEST_CASE_SEGRESNET_DS3) def test_shape3(self, input_param, input_shape, expected_shapes): - dsdepth = input_param.get("dsdepth", 1) net = SegResNetDS(**input_param).to(device) diff --git a/tests/test_selfattention.py b/tests/test_selfattention.py index 08eac70e51..926ef7da55 100644 --- a/tests/test_selfattention.py +++ b/tests/test_selfattention.py @@ -28,7 +28,6 @@ for dropout_rate in np.linspace(0, 1, 4): for hidden_size in [360, 480, 600, 768]: for num_heads in [4, 6, 8, 12]: - test_case = [ {"hidden_size": hidden_size, "num_heads": num_heads, "dropout_rate": dropout_rate}, (2, 512, hidden_size), diff --git a/tests/test_squeezedim.py b/tests/test_squeezedim.py index 9f08af540f..6673fd25c1 100644 --- a/tests/test_squeezedim.py +++ b/tests/test_squeezedim.py @@ -34,7 +34,6 @@ class TestSqueezeDim(unittest.TestCase): @parameterized.expand(TESTS) def test_shape(self, input_param, test_data, expected_shape): - result = SqueezeDim(**input_param)(test_data) self.assertTupleEqual(result.shape, expected_shape) if "dim" in input_param and input_param["dim"] == 2 and isinstance(result, MetaTensor): @@ -42,7 +41,6 @@ def test_shape(self, input_param, test_data, expected_shape): @parameterized.expand(TESTS_FAIL) def test_invalid_inputs(self, exception, input_param, test_data): - with self.assertRaises(exception): SqueezeDim(**input_param)(test_data) diff --git a/tests/test_state_cacher.py b/tests/test_state_cacher.py index 6cb404b976..2037dc3951 100644 --- a/tests/test_state_cacher.py +++ b/tests/test_state_cacher.py @@ -38,7 +38,6 @@ class TestStateCacher(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_state_cacher(self, data_obj, params): - key = "data_obj" state_cacher = StateCacher(**params) diff --git a/tests/test_tile_on_grid.py b/tests/test_tile_on_grid.py index 714b65f98c..06b5fe65f0 100644 --- a/tests/test_tile_on_grid.py +++ b/tests/test_tile_on_grid.py @@ -77,7 +77,6 @@ def make_image( seed=123, **kwargs, ): - tile_count = int(np.sqrt(tile_count)) pad = 0 if random_offset: @@ -117,7 +116,6 @@ def make_image( class TestTileOnGrid(unittest.TestCase): @parameterized.expand(TESTS) def test_tile_patch_single_call(self, in_type, input_parameters): - img, tiles = make_image(**input_parameters) input_img = in_type(img) @@ -127,7 +125,6 @@ def test_tile_patch_single_call(self, in_type, input_parameters): @parameterized.expand(TESTS2) def test_tile_patch_random_call(self, in_type, input_parameters): - img, tiles = make_image(**input_parameters, seed=123) input_img = in_type(img) diff --git a/tests/test_tile_on_grid_dict.py b/tests/test_tile_on_grid_dict.py index cb824ee2e6..bb8689fd3b 100644 --- a/tests/test_tile_on_grid_dict.py +++ b/tests/test_tile_on_grid_dict.py @@ -86,7 +86,6 @@ def make_image( seed=123, **kwargs, ): - tile_count = int(np.sqrt(tile_count)) pad = 0 if random_offset: @@ -126,7 +125,6 @@ def make_image( class TestTileOnGridDict(unittest.TestCase): @parameterized.expand(TESTS) def test_tile_patch_single_call(self, in_type, input_parameters): - key = "image" input_parameters["keys"] = key @@ -149,7 +147,6 @@ def test_tile_patch_single_call(self, in_type, input_parameters): @parameterized.expand(TESTS2) def test_tile_patch_random_call(self, in_type, input_parameters): - key = "image" input_parameters["keys"] = key diff --git a/tests/test_trainable_bilateral.py b/tests/test_trainable_bilateral.py index 1300e5068d..43b628be80 100644 --- a/tests/test_trainable_bilateral.py +++ b/tests/test_trainable_bilateral.py @@ -275,7 +275,6 @@ class BilateralFilterTestCaseCpuPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -302,7 +301,6 @@ def test_cpu_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -375,7 +373,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, expec class BilateralFilterTestCaseCudaPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -406,7 +403,6 @@ def test_cuda_precise(self, test_case_description, sigmas, input, expected): @parameterized.expand(TEST_CASES) def test_cuda_precise_backwards(self, test_case_description, sigmas, input, expected): - # Params to determine the implementation to test device = torch.device("cuda") diff --git a/tests/test_trainable_joint_bilateral.py b/tests/test_trainable_joint_bilateral.py index a8725dbf17..a42510b7c6 100644 --- a/tests/test_trainable_joint_bilateral.py +++ b/tests/test_trainable_joint_bilateral.py @@ -359,7 +359,6 @@ class JointBilateralFilterTestCaseCpuPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cpu_precise(self, test_case_description, sigmas, input, guide, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -389,7 +388,6 @@ def test_cpu_precise(self, test_case_description, sigmas, input, guide, expected @parameterized.expand(TEST_CASES) def test_cpu_precise_backwards(self, test_case_description, sigmas, input, guide, expected): - # Params to determine the implementation to test device = torch.device("cpu") @@ -484,7 +482,6 @@ def test_cpu_precise_backwards(self, test_case_description, sigmas, input, guide class JointBilateralFilterTestCaseCudaPrecise(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_cuda_precise(self, test_case_description, sigmas, input, guide, expected): - # Skip this test if not torch.cuda.is_available(): return @@ -518,7 +515,6 @@ def test_cuda_precise(self, test_case_description, sigmas, input, guide, expecte @parameterized.expand(TEST_CASES) def test_cuda_precise_backwards(self, test_case_description, sigmas, input, guide, expected): - # Params to determine the implementation to test device = torch.device("cuda") diff --git a/tests/test_transformerblock.py b/tests/test_transformerblock.py index 2650367886..f1a20b842c 100644 --- a/tests/test_transformerblock.py +++ b/tests/test_transformerblock.py @@ -25,7 +25,6 @@ for hidden_size in [360, 480, 600, 768]: for num_heads in [4, 8, 12]: for mlp_dim in [1024, 3072]: - test_case = [ { "hidden_size": hidden_size, diff --git a/tests/testing_data/integration_answers.py b/tests/testing_data/integration_answers.py index 8bf6b1e368..f4a5483f83 100644 --- a/tests/testing_data/integration_answers.py +++ b/tests/testing_data/integration_answers.py @@ -644,7 +644,7 @@ def test_integration_value(test_name, key, data, rtol=1e-2): - for (idx, expected) in enumerate(EXPECTED_ANSWERS): + for idx, expected in enumerate(EXPECTED_ANSWERS): if test_name not in expected: continue if key not in expected[test_name]: diff --git a/tests/utils.py b/tests/utils.py index 280b848806..2f4b6d81ac 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -575,7 +575,6 @@ def run_process(func, args, kwargs, results): results.put(e) def __call__(self, obj): - if self.skip_timing: return obj From f03baaeacd71d3a28aa1c958ed516f50f2210d1b Mon Sep 17 00:00:00 2001 From: Carlotta Date: Sun, 5 Feb 2023 12:36:22 +0100 Subject: [PATCH 03/68] initial commit, reformated original monai files --- monai/losses/quicknatLoss.py | 179 ----------------------------------- 1 file changed, 179 deletions(-) delete mode 100644 monai/losses/quicknatLoss.py diff --git a/monai/losses/quicknatLoss.py b/monai/losses/quicknatLoss.py deleted file mode 100644 index a0af0cc981..0000000000 --- a/monai/losses/quicknatLoss.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Description -++++++++++++++++++++++ -Addition CombinedLosses module which is not part of standard monai loss library. - -Usage -++++++++++++++++++++++ -Import the package and Instantiate any loss class you want to you:: - - from nn_common_modules import losses as additional_losses - loss = additional_losses.CombinedLoss() - -Members -++++++++++++++++++++++ -""" -from __future__ import annotations - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.loss import _Loss, _WeightedLoss - - -class DiceLoss(_WeightedLoss): - """ - Dice Loss for a batch of samples - """ - - def forward(self, output, target, weights=None, ignore_index=None, binary=False): - """ - Forward pass - - :param output: NxCxHxW logits - :param target: NxHxW LongTensor - :param weights: C FloatTensor - :param ignore_index: int index to ignore from loss - :param binary: bool for binarized one chaneel(C=1) input - :return: torch.tensor - """ - output = F.softmax(output, dim=1) - if binary: - return self._dice_loss_binary(output, target) - return self._dice_loss_multichannel(output, target, weights, ignore_index) - - @staticmethod - def _dice_loss_binary(output, target): - """ - Dice loss for one channel binarized input - - :param output: Nx1xHxW logits - :param target: NxHxW LongTensor - :return: - """ - eps = 0.0001 - - intersection = output * target - numerator = 2 * intersection.sum(0).sum(1).sum(1) - denominator = output + target - denominator = denominator.sum(0).sum(1).sum(1) + eps - loss_per_channel = 1 - (numerator / denominator) - - return loss_per_channel.sum() / output.size(1) - - @staticmethod - def _dice_loss_multichannel(output, target, weights=None, ignore_index=None): - """ - Forward pass - - :param output: NxCxHxW Variable - :param target: NxHxW LongTensor - :param weights: C FloatTensor - :param ignore_index: int index to ignore from loss - :param binary: bool for binarized one chaneel(C=1) input - :return: - """ - eps = 0.0001 - encoded_target = output.detach() * 0 - - if ignore_index is not None: - mask = target == ignore_index - target = target.clone() - target[mask] = 0 - encoded_target.scatter_(1, target.unsqueeze(1), 1) - mask = mask.unsqueeze(1).expand_as(encoded_target) - encoded_target[mask] = 0 - else: - encoded_target.scatter_(1, target.unsqueeze(1), 1) - - if weights is None: - weights = 1 - - intersection = output * encoded_target - numerator = 2 * intersection.sum(0).sum(1).sum(1) - denominator = output + encoded_target - - if ignore_index is not None: - denominator[mask] = 0 - denominator = denominator.sum(0).sum(1).sum(1) + eps - loss_per_channel = weights * (1 - (numerator / denominator)) - - return loss_per_channel.sum() / output.size(1) - - -class CrossEntropyLoss2d(_WeightedLoss): - """ - Standard pytorch weighted nn.CrossEntropyLoss - """ - - def __init__(self, weight_mfb=None): - super(CrossEntropyLoss2d, self).__init__() - self.nll_loss = nn.CrossEntropyLoss(weight_mfb) - - def forward(self, inputs, targets): - """ - Forward pass - - :param inputs: torch.tensor (NxC) - :param targets: torch.tensor (N) - :return: scalar - """ - return self.nll_loss(inputs, targets) - - -class CombinedLoss(_Loss): - """ - A combination of dice and cross entropy loss - """ - - def __init__(self, weight_mfb=None): - super(CombinedLoss, self).__init__() - self.cross_entropy_loss = CrossEntropyLoss2d(weight_mfb) - self.dice_loss = DiceLoss() - self.softmax = True - self.sigmoid = False - self.to_onehot_y = True - self.other_act = None - - def forward(self, input, target, weight=None): - """ - Forward pass - needs following forms - input: torch.tensor (NxCxHxW) - target: torch.tensor (NxHxW) - weight: torch.tensor (NxHxW) - to comform with monai standards accept - :params:input: torch.tensor (NxCxHxW) - :params:target: torch.tensor (NxCxHxW) - :params:weight: torch.tensor (NxCxHxW) - :return: scalar - """ - # transform of target and weight - target = target.type(torch.LongTensor) - target = torch.argmax(target, dim=1) - - if weight is not None: - weight = weight.type(torch.LongTensor) - weight = torch.argmax(weight, dim=1) - - input_soft = F.softmax(input, dim=1) - - y_2 = torch.mean(self.dice_loss(input_soft, target)) - - if weight is None: - y_1 = torch.mean(self.cross_entropy_loss.forward(input, target)) - - else: - y_1 = torch.mean(torch.mul(self.cross_entropy_loss.forward(input, target), weight)) - - return y_1 + y_2 From 916b58b31eb3a7e95690139cf1a9878d58570af9 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Mon, 6 Feb 2023 12:59:30 +0100 Subject: [PATCH 04/68] blocks and network structures for daf3d --- monai/networks/blocks/aspp.py | 35 +++++++ monai/networks/blocks/backbone_fpn_utils.py | 15 ++- .../blocks/feature_pyramid_network.py | 44 +++++++++ monai/networks/nets/daf3d.py | 99 +++++++++++++++++++ monai/networks/nets/resnet.py | 73 ++++++++++++++ 5 files changed, 265 insertions(+), 1 deletion(-) create mode 100644 monai/networks/nets/daf3d.py diff --git a/monai/networks/blocks/aspp.py b/monai/networks/blocks/aspp.py index 1f6c76c3af..7852248b9d 100644 --- a/monai/networks/blocks/aspp.py +++ b/monai/networks/blocks/aspp.py @@ -17,6 +17,7 @@ import torch.nn as nn from monai.networks.blocks.convolutions import Convolution +from monai.networks.blocks import ADN from monai.networks.layers import same_padding from monai.networks.layers.factories import Conv @@ -105,3 +106,37 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x_out = torch.cat([conv(x) for conv in self.convs], dim=1) x_out = self.conv_k1(x_out) return x_out + + +class DAF3D_ASPP(SimpleASPP): + def __init__( + self, + spatial_dims: int, + in_channels: int, + conv_out_channels: int, + kernel_sizes: Sequence[int] = (1, 3, 3, 3), + dilations: Sequence[int] = (1, 2, 4, 6), + norm_type: tuple | str | None = "BATCH", + bias: bool = False, + ) -> None: + super().__init__(spatial_dims, in_channels, conv_out_channels, kernel_sizes, dilations, norm_type, bias=bias) + + #change convolutions in self.convs so they fit our needs + new_convs = nn.ModuleList() + for _conv in self.convs: + tmp_conv = Convolution(1,1,1) + tmp_conv.conv = _conv + tmp_conv.adn = ADN(ordering="N", norm=norm_type, norm_dim=1) + tmp_conv = self._init_weight(tmp_conv) + new_convs.append(tmp_conv) + self.convs = new_convs + + #change final convolution + self.conv_k1 = Convolution(spatial_dims=3, in_channels=4*in_channels, out_channels=conv_out_channels, kernel_size=1, adn_ordering="N", norm=norm_type) + + def _init_weight(self, conv): + for m in conv.modules(): + if isinstance(m, nn.Conv3d): #true for conv.conv + torch.nn.init.kaiming_normal_(m.weight) + return conv + diff --git a/monai/networks/blocks/backbone_fpn_utils.py b/monai/networks/blocks/backbone_fpn_utils.py index 824b31a83b..03742fde7f 100644 --- a/monai/networks/blocks/backbone_fpn_utils.py +++ b/monai/networks/blocks/backbone_fpn_utils.py @@ -57,7 +57,7 @@ from monai.networks.nets import resnet from monai.utils import optional_import -from .feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool +from .feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool, DAF3D_FPN torchvision_models, _ = optional_import("torchvision.models") @@ -173,3 +173,16 @@ def _resnet_fpn_extractor( return BackboneWithFPN( backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, spatial_dims=spatial_dims ) + +class DAF3D_BackboneWithFPN(BackboneWithFPN): + def __init__( + self, + backbone: nn.Module, + return_layers: dict[str, str], + in_channels_list: list[int], + out_channels: int, + spatial_dims: int | None = None, + extra_blocks: ExtraFPNBlock | None = None) -> None: + + super().__init__(backbone, return_layers, in_channels_list, out_channels, spatial_dims, extra_blocks) + self.fpn = DAF3D_FPN(spatial_dims, in_channels_list, out_channels) diff --git a/monai/networks/blocks/feature_pyramid_network.py b/monai/networks/blocks/feature_pyramid_network.py index cca7342078..bd05814c3b 100644 --- a/monai/networks/blocks/feature_pyramid_network.py +++ b/monai/networks/blocks/feature_pyramid_network.py @@ -59,6 +59,7 @@ from torch import Tensor, nn from monai.networks.layers.factories import Conv, Pool +from monai.networks.blocks.convolutions import Convolution __all__ = ["ExtraFPNBlock", "LastLevelMaxPool", "LastLevelP6P7", "FeaturePyramidNetwork"] @@ -262,3 +263,46 @@ def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: out = OrderedDict(list(zip(names, results))) return out + +class DAF3D_FPN(FeaturePyramidNetwork): + def __init__( + self, + spatial_dims: int, + in_channels_list: list[int], + out_channels: int, + extra_blocks: ExtraFPNBlock | None = None,): + + super().__init__(spatial_dims, in_channels_list, out_channels, extra_blocks) + + self.inner_blocks = nn.ModuleList() + for in_channels in in_channels_list: + if in_channels == 0: + raise ValueError("in_channels=0 is currently not supported") + inner_block_module = Convolution(spatial_dims, in_channels, out_channels, kernel_size=1, adn_ordering="NA", act="PRELU", norm=("group", {"num_groups": 32, "num_channels" : 128})) + self.inner_blocks.append(inner_block_module) + + def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: + + # unpack OrderedDict into two lists for easier handling + names = list(x.keys()) + x_values: list[Tensor] = list(x.values()) + + last_inner = self.get_result_from_inner_blocks(x_values[-1], -1) + results = [] + results.append(last_inner) + + for idx in range(len(x_values) - 2, -1, -1): + inner_lateral = self.get_result_from_inner_blocks(x_values[idx], idx) + feat_shape = inner_lateral.shape[2:] + inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="trilinear") + last_inner = inner_lateral + inner_top_down + results.insert(0, last_inner) + + if self.extra_blocks is not None: + results, names = self.extra_blocks(results, x_values, names) + + results = [results[0]] + [F.interpolate(l, size=x["feat1"].size()[2:], mode="trilinear") for l in results[1:]] + # make it back an OrderedDict + out = OrderedDict(list(zip(names, results))) + + return out \ No newline at end of file diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py new file mode 100644 index 0000000000..4eb2a342e3 --- /dev/null +++ b/monai/networks/nets/daf3d.py @@ -0,0 +1,99 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from monai.networks.blocks.convolutions import Convolution +from monai.networks.blocks.backbone_fpn_utils import DAF3D_BackboneWithFPN +from monai.networks.blocks.aspp import DAF3D_ASPP +from monai.networks.nets.resnet import DAF3D_ResNet, DAF3D_ResNetBottleneck + + +class AttentionModule(nn.Module): + def __init__(self): + super().__init__() + + group_norm = ("group", {"num_groups": 32, "num_channels" : 64}) + + self.attentive_map = nn.Sequential( + Convolution(spatial_dims=3, in_channels=192, out_channels=64, kernel_size=1, norm=group_norm, act="PRELU"), + Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU"), + Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, adn_ordering="A", act="SIGMOID") + ) + self.refine = nn.Sequential( + Convolution(spatial_dims=3, in_channels=192, out_channels=64, kernel_size=1, norm=group_norm, act="PRELU"), + Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU"), + Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU"), + ) + + def forward(self, slf, mlf): + att = self.attentive_map(torch.cat((slf, mlf), 1)) + out = self.refine(torch.cat((slf, att * mlf), 1)) + return out + +class DAF3D_Backbone(nn.Module): + def __init__(self): + super().__init__() + net = DAF3D_ResNet(DAF3D_ResNetBottleneck, [3,4,6,3], [128,256,512,1024]) + net = list(net.children()) + self.layer0 = nn.Sequential(*net[:3]) + # the layer0 contains the first convolution, bn and relu + self.layer1 = nn.Sequential(*net[3:5]) + # the layer1 contains the first pooling and the first 3 bottle blocks + self.layer2 = net[5] + # the layer2 contains the second 4 bottle blocks + self.layer3 = net[6] + # the layer3 contains the media bottle blocks + # with 6 in 50-layers and 23 in 101-layers + self.layer4 = net[7] + # the layer4 contains the final 3 bottle blocks + # according the backbone the next is avg-pooling and dense with num classes uints + # but we don't use the final two layers in backbone networks + + def forward(self, x): + layer0 = self.layer0(x) + layer1 = self.layer1(layer0) + layer2 = self.layer2(layer1) + layer3 = self.layer3(layer2) + layer4 = self.layer4(layer3) + return layer4 + + +class DAF3D_Monai(nn.Module): + def __init__(self): + super().__init__() + self.backbone_with_fpn = DAF3D_BackboneWithFPN(backbone=DAF3D_Backbone(), return_layers={"layer1" : "feat1", "layer2" : "feat2", "layer3" : "feat3", "layer4" : "feat4"}, in_channels_list=[256, 512, 1024, 2048], out_channels=128, spatial_dims=3) + self.predict1 = nn.Conv3d(128, 1, kernel_size=1) + + group_norm = ("group", {"num_groups": 32, "num_channels" : 64}) + act_prelu = ("prelu", {"num_parameters": 1, "init" : 0.25}) + self.fuse = nn.Sequential( + Convolution(spatial_dims=3, in_channels=512, out_channels=64, kernel_size=1, adn_ordering="NA", norm=group_norm, act=act_prelu), + Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, adn_ordering="NA", padding=1, norm=group_norm, act=act_prelu), + Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, adn_ordering="NA", padding=1, norm=group_norm, act=act_prelu) + ) + self.attention = AttentionModule() + + self.refine = Convolution(3, 256, 64, kernel_size=1, adn_ordering="NA", norm=group_norm, act=act_prelu) + self.predict2 = nn.Conv3d(64, 1, kernel_size=1) + self.aspp = DAF3D_ASPP(3, 64, 64, kernel_sizes=(3,3,3,3), dilations=((1,1,1), (1,6,6), (1,12,12), (1,18,18)), norm_type=group_norm, bias=True) + + + def forward(self, x): + #layers from 1 - 4 + single_layer_features = [l for l in self.backbone_with_fpn(x).values()] + #first 4 supervised signals (slfs 1 - 4) + supervised1 = [self.predict1(slf) for slf in single_layer_features] + mlf = self.fuse(torch.cat(single_layer_features, 1)) + attentive_feature_maps = [self.attention(slf, mlf) for slf in single_layer_features] + #second 4 supervised signals (af 1 - 4) + supervised2 = [self.predict2(af) for af in attentive_feature_maps] + attentive_mlf = self.refine(torch.cat(attentive_feature_maps, 1)) + aspp = self.aspp(attentive_mlf) + supervised_final = self.predict2(aspp) + + if self.training: + output = supervised1 + supervised2 + [supervised_final] + output = [F.interpolate(o, size=x.size()[2:], mode='trilinear') for o in output] + else: + output = F.interpolate(supervised_final, size=x.size()[2:], mode='trilinear') + return output \ No newline at end of file diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index 02869d415f..ba742c645e 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -427,3 +427,76 @@ def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs) + + +class DAF3D_ResNetBottleneck(ResNetBottleneck): + expansion = 2 + + def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): + + norm_type: Callable = Norm[Norm.GROUP, spatial_dims] + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + + #in case downsample uses batch norm, change to group norm + if isinstance(downsample, nn.Sequential): + downsample = nn.Sequential( + conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False), + norm_type(num_groups=32, num_channels=planes*self.expansion) + ) + + super().__init__(in_planes, planes, 3, stride, downsample) + + #change norm from batch to group norm + self.bn1 = norm_type(num_groups=32, num_channels=planes) + self.bn2 = norm_type(num_groups=32, num_channels=planes) + self.bn3 = norm_type(num_groups=32, num_channels=planes * self.expansion) + + #adapt one convolution which is implemented differently + self.conv2 = conv_type( + planes, + planes, + kernel_size=3, + padding=1, + stride=stride, + groups=32, + bias=False) + + #adapt activation function + self.relu = nn.PReLU() + +class DAF3D_ResNetDilatedBottleneck(DAF3D_ResNetBottleneck): + def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): + super().__init__(in_planes, planes, spatial_dims, stride, downsample) + + #add dilation in second convolution + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + self.conv2 = conv_type( + planes, + planes, + kernel_size=3, + stride=stride, + padding=2, + dilation=2, + groups=32, + bias=False + ) + +class DAF3D_ResNet(ResNet): + def __init__(self, block, layers, block_inplanes, shortcut_type='B'): + super().__init__(block, layers, block_inplanes, n_input_channels=1, num_classes=2, shortcut_type=shortcut_type) + + self.in_planes = 64 + + #adapt first convolution + conv_type : Callable = Conv[Conv.CONV, 3] + norm_type: Callable = Norm[Norm.GROUP, 3] + + self.conv1 = conv_type(1, self.in_planes, kernel_size=7, stride=(1,2,2), padding=(3,3,3), bias=False) + self.bn1 = norm_type(32, 64) + self.relu = nn.PReLU() + + #adapt layers to our needs + self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], 3, shortcut_type) + self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], 3, shortcut_type, stride=(1, 2, 2)) + self.layer3 = self._make_layer(DAF3D_ResNetDilatedBottleneck, block_inplanes[2], layers[2], 3, shortcut_type, stride=1) + self.layer4 = self._make_layer(DAF3D_ResNetDilatedBottleneck, block_inplanes[3], layers[3], 3, shortcut_type, stride=1) \ No newline at end of file From 6717c12fd3ef78f9ff190a031f2ae68a5916b1e4 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Wed, 8 Feb 2023 16:18:36 +0100 Subject: [PATCH 05/68] basic code reformatting --- monai/networks/blocks/aspp.py | 30 ++-- monai/networks/blocks/backbone_fpn_utils.py | 34 +++-- .../blocks/feature_pyramid_network.py | 24 ++- monai/networks/nets/daf3d.py | 138 +++++++++++++----- monai/networks/nets/resnet.py | 65 ++++----- 5 files changed, 189 insertions(+), 102 deletions(-) diff --git a/monai/networks/blocks/aspp.py b/monai/networks/blocks/aspp.py index 7852248b9d..97b3b8fdbd 100644 --- a/monai/networks/blocks/aspp.py +++ b/monai/networks/blocks/aspp.py @@ -16,8 +16,8 @@ import torch import torch.nn as nn -from monai.networks.blocks.convolutions import Convolution from monai.networks.blocks import ADN +from monai.networks.blocks.convolutions import Convolution from monai.networks.layers import same_padding from monai.networks.layers.factories import Conv @@ -108,7 +108,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x_out -class DAF3D_ASPP(SimpleASPP): +class Daf3dASPP(SimpleASPP): def __init__( self, spatial_dims: int, @@ -120,23 +120,29 @@ def __init__( bias: bool = False, ) -> None: super().__init__(spatial_dims, in_channels, conv_out_channels, kernel_sizes, dilations, norm_type, bias=bias) - - #change convolutions in self.convs so they fit our needs + + # change convolutions in self.convs so they fit our needs new_convs = nn.ModuleList() - for _conv in self.convs: - tmp_conv = Convolution(1,1,1) - tmp_conv.conv = _conv + for _conv in self.convs: + tmp_conv = Convolution(1, 1, 1) + tmp_conv.conv = _conv tmp_conv.adn = ADN(ordering="N", norm=norm_type, norm_dim=1) tmp_conv = self._init_weight(tmp_conv) new_convs.append(tmp_conv) self.convs = new_convs - #change final convolution - self.conv_k1 = Convolution(spatial_dims=3, in_channels=4*in_channels, out_channels=conv_out_channels, kernel_size=1, adn_ordering="N", norm=norm_type) + # change final convolution + self.conv_k1 = Convolution( + spatial_dims=3, + in_channels=4 * in_channels, + out_channels=conv_out_channels, + kernel_size=1, + adn_ordering="N", + norm=norm_type, + ) def _init_weight(self, conv): for m in conv.modules(): - if isinstance(m, nn.Conv3d): #true for conv.conv - torch.nn.init.kaiming_normal_(m.weight) + if isinstance(m, nn.Conv3d): # true for conv.conv + torch.nn.init.kaiming_normal_(m.weight) return conv - diff --git a/monai/networks/blocks/backbone_fpn_utils.py b/monai/networks/blocks/backbone_fpn_utils.py index 03742fde7f..9f4864e5ab 100644 --- a/monai/networks/blocks/backbone_fpn_utils.py +++ b/monai/networks/blocks/backbone_fpn_utils.py @@ -57,7 +57,7 @@ from monai.networks.nets import resnet from monai.utils import optional_import -from .feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool, DAF3D_FPN +from .feature_pyramid_network import Daf3dFPN, ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool torchvision_models, _ = optional_import("torchvision.models") @@ -174,15 +174,27 @@ def _resnet_fpn_extractor( backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, spatial_dims=spatial_dims ) -class DAF3D_BackboneWithFPN(BackboneWithFPN): + +class Daf3dBackboneWithFPN(BackboneWithFPN): def __init__( - self, - backbone: nn.Module, - return_layers: dict[str, str], - in_channels_list: list[int], - out_channels: int, - spatial_dims: int | None = None, - extra_blocks: ExtraFPNBlock | None = None) -> None: - + self, + backbone: nn.Module, + return_layers: dict[str, str], + in_channels_list: list[int], + out_channels: int, + spatial_dims: int | None = None, + extra_blocks: ExtraFPNBlock | None = None, + ) -> None: super().__init__(backbone, return_layers, in_channels_list, out_channels, spatial_dims, extra_blocks) - self.fpn = DAF3D_FPN(spatial_dims, in_channels_list, out_channels) + + if spatial_dims is None: + if hasattr(backbone, "spatial_dims") and isinstance(backbone.spatial_dims, int): + spatial_dims = backbone.spatial_dims + elif isinstance(backbone.conv1, nn.Conv2d): + spatial_dims = 2 + elif isinstance(backbone.conv1, nn.Conv3d): + spatial_dims = 3 + else: + raise ValueError("Could not find spatial_dims of backbone, please specify it.") + + self.fpn = Daf3dFPN(spatial_dims, in_channels_list, out_channels) diff --git a/monai/networks/blocks/feature_pyramid_network.py b/monai/networks/blocks/feature_pyramid_network.py index bd05814c3b..fa7147ca84 100644 --- a/monai/networks/blocks/feature_pyramid_network.py +++ b/monai/networks/blocks/feature_pyramid_network.py @@ -58,8 +58,8 @@ import torch.nn.functional as F from torch import Tensor, nn -from monai.networks.layers.factories import Conv, Pool from monai.networks.blocks.convolutions import Convolution +from monai.networks.layers.factories import Conv, Pool __all__ = ["ExtraFPNBlock", "LastLevelMaxPool", "LastLevelP6P7", "FeaturePyramidNetwork"] @@ -264,25 +264,33 @@ def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: return out -class DAF3D_FPN(FeaturePyramidNetwork): + +class Daf3dFPN(FeaturePyramidNetwork): def __init__( - self, + self, spatial_dims: int, in_channels_list: list[int], out_channels: int, - extra_blocks: ExtraFPNBlock | None = None,): - + extra_blocks: ExtraFPNBlock | None = None, + ): super().__init__(spatial_dims, in_channels_list, out_channels, extra_blocks) self.inner_blocks = nn.ModuleList() for in_channels in in_channels_list: if in_channels == 0: raise ValueError("in_channels=0 is currently not supported") - inner_block_module = Convolution(spatial_dims, in_channels, out_channels, kernel_size=1, adn_ordering="NA", act="PRELU", norm=("group", {"num_groups": 32, "num_channels" : 128})) + inner_block_module = Convolution( + spatial_dims, + in_channels, + out_channels, + kernel_size=1, + adn_ordering="NA", + act="PRELU", + norm=("group", {"num_groups": 32, "num_channels": 128}), + ) self.inner_blocks.append(inner_block_module) def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: - # unpack OrderedDict into two lists for easier handling names = list(x.keys()) x_values: list[Tensor] = list(x.values()) @@ -305,4 +313,4 @@ def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: # make it back an OrderedDict out = OrderedDict(list(zip(names, results))) - return out \ No newline at end of file + return out diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index 4eb2a342e3..5b8b15d086 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -1,28 +1,58 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from __future__ import annotations + import torch import torch.nn as nn import torch.nn.functional as F +from monai.networks.blocks.aspp import Daf3dASPP +from monai.networks.blocks.backbone_fpn_utils import Daf3dBackboneWithFPN from monai.networks.blocks.convolutions import Convolution -from monai.networks.blocks.backbone_fpn_utils import DAF3D_BackboneWithFPN -from monai.networks.blocks.aspp import DAF3D_ASPP -from monai.networks.nets.resnet import DAF3D_ResNet, DAF3D_ResNetBottleneck +from monai.networks.nets.resnet import Daf3dResNet, Daf3dResNetBottleneck + +__all__ = ["AttentionModule", "Daf3dBackbone", "DAF3D"] class AttentionModule(nn.Module): def __init__(self): super().__init__() - group_norm = ("group", {"num_groups": 32, "num_channels" : 64}) - + group_norm = ("group", {"num_groups": 32, "num_channels": 64}) + self.attentive_map = nn.Sequential( Convolution(spatial_dims=3, in_channels=192, out_channels=64, kernel_size=1, norm=group_norm, act="PRELU"), - Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU"), - Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, adn_ordering="A", act="SIGMOID") + Convolution( + spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU" + ), + Convolution( + spatial_dims=3, + in_channels=64, + out_channels=64, + kernel_size=3, + padding=1, + adn_ordering="A", + act="SIGMOID", + ), ) self.refine = nn.Sequential( Convolution(spatial_dims=3, in_channels=192, out_channels=64, kernel_size=1, norm=group_norm, act="PRELU"), - Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU"), - Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU"), + Convolution( + spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU" + ), + Convolution( + spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU" + ), ) def forward(self, slf, mlf): @@ -30,25 +60,26 @@ def forward(self, slf, mlf): out = self.refine(torch.cat((slf, att * mlf), 1)) return out -class DAF3D_Backbone(nn.Module): + +class Daf3dBackbone(nn.Module): def __init__(self): super().__init__() - net = DAF3D_ResNet(DAF3D_ResNetBottleneck, [3,4,6,3], [128,256,512,1024]) - net = list(net.children()) - self.layer0 = nn.Sequential(*net[:3]) + net = Daf3dResNet(Daf3dResNetBottleneck, [3, 4, 6, 3], [128, 256, 512, 1024]) + net_modules = list(net.children()) + self.layer0 = nn.Sequential(*net_modules[:3]) # the layer0 contains the first convolution, bn and relu - self.layer1 = nn.Sequential(*net[3:5]) + self.layer1 = nn.Sequential(*net_modules[3:5]) # the layer1 contains the first pooling and the first 3 bottle blocks - self.layer2 = net[5] + self.layer2 = net_modules[5] # the layer2 contains the second 4 bottle blocks - self.layer3 = net[6] + self.layer3 = net_modules[6] # the layer3 contains the media bottle blocks # with 6 in 50-layers and 23 in 101-layers - self.layer4 = net[7] + self.layer4 = net_modules[7] # the layer4 contains the final 3 bottle blocks # according the backbone the next is avg-pooling and dense with num classes uints # but we don't use the final two layers in backbone networks - + def forward(self, x): layer0 = self.layer0(x) layer1 = self.layer1(layer0) @@ -58,34 +89,73 @@ def forward(self, x): return layer4 -class DAF3D_Monai(nn.Module): +class DAF3D(nn.Module): def __init__(self): super().__init__() - self.backbone_with_fpn = DAF3D_BackboneWithFPN(backbone=DAF3D_Backbone(), return_layers={"layer1" : "feat1", "layer2" : "feat2", "layer3" : "feat3", "layer4" : "feat4"}, in_channels_list=[256, 512, 1024, 2048], out_channels=128, spatial_dims=3) + self.backbone_with_fpn = Daf3dBackboneWithFPN( + backbone=Daf3dBackbone(), + return_layers={"layer1": "feat1", "layer2": "feat2", "layer3": "feat3", "layer4": "feat4"}, + in_channels_list=[256, 512, 1024, 2048], + out_channels=128, + spatial_dims=3, + ) self.predict1 = nn.Conv3d(128, 1, kernel_size=1) - group_norm = ("group", {"num_groups": 32, "num_channels" : 64}) - act_prelu = ("prelu", {"num_parameters": 1, "init" : 0.25}) + group_norm = ("group", {"num_groups": 32, "num_channels": 64}) + act_prelu = ("prelu", {"num_parameters": 1, "init": 0.25}) self.fuse = nn.Sequential( - Convolution(spatial_dims=3, in_channels=512, out_channels=64, kernel_size=1, adn_ordering="NA", norm=group_norm, act=act_prelu), - Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, adn_ordering="NA", padding=1, norm=group_norm, act=act_prelu), - Convolution(spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, adn_ordering="NA", padding=1, norm=group_norm, act=act_prelu) + Convolution( + spatial_dims=3, + in_channels=512, + out_channels=64, + kernel_size=1, + adn_ordering="NA", + norm=group_norm, + act=act_prelu, + ), + Convolution( + spatial_dims=3, + in_channels=64, + out_channels=64, + kernel_size=3, + adn_ordering="NA", + padding=1, + norm=group_norm, + act=act_prelu, + ), + Convolution( + spatial_dims=3, + in_channels=64, + out_channels=64, + kernel_size=3, + adn_ordering="NA", + padding=1, + norm=group_norm, + act=act_prelu, + ), ) self.attention = AttentionModule() self.refine = Convolution(3, 256, 64, kernel_size=1, adn_ordering="NA", norm=group_norm, act=act_prelu) self.predict2 = nn.Conv3d(64, 1, kernel_size=1) - self.aspp = DAF3D_ASPP(3, 64, 64, kernel_sizes=(3,3,3,3), dilations=((1,1,1), (1,6,6), (1,12,12), (1,18,18)), norm_type=group_norm, bias=True) - + self.aspp = Daf3dASPP( + 3, + 64, + 64, + kernel_sizes=(3, 3, 3, 3), + dilations=((1, 1, 1), (1, 6, 6), (1, 12, 12), (1, 18, 18)), + norm_type=group_norm, + bias=True, + ) def forward(self, x): - #layers from 1 - 4 - single_layer_features = [l for l in self.backbone_with_fpn(x).values()] - #first 4 supervised signals (slfs 1 - 4) + # layers from 1 - 4 + single_layer_features = list(self.backbone_with_fpn(x).values()) + # first 4 supervised signals (slfs 1 - 4) supervised1 = [self.predict1(slf) for slf in single_layer_features] mlf = self.fuse(torch.cat(single_layer_features, 1)) attentive_feature_maps = [self.attention(slf, mlf) for slf in single_layer_features] - #second 4 supervised signals (af 1 - 4) + # second 4 supervised signals (af 1 - 4) supervised2 = [self.predict2(af) for af in attentive_feature_maps] attentive_mlf = self.refine(torch.cat(attentive_feature_maps, 1)) aspp = self.aspp(attentive_mlf) @@ -93,7 +163,7 @@ def forward(self, x): if self.training: output = supervised1 + supervised2 + [supervised_final] - output = [F.interpolate(o, size=x.size()[2:], mode='trilinear') for o in output] + output = [F.interpolate(o, size=x.size()[2:], mode="trilinear") for o in output] else: - output = F.interpolate(supervised_final, size=x.size()[2:], mode='trilinear') - return output \ No newline at end of file + output = F.interpolate(supervised_final, size=x.size()[2:], mode="trilinear") + return output diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index ba742c645e..ac7bc867b1 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -429,74 +429,65 @@ def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs) -class DAF3D_ResNetBottleneck(ResNetBottleneck): +class Daf3dResNetBottleneck(ResNetBottleneck): expansion = 2 def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): - norm_type: Callable = Norm[Norm.GROUP, spatial_dims] conv_type: Callable = Conv[Conv.CONV, spatial_dims] - - #in case downsample uses batch norm, change to group norm + + # in case downsample uses batch norm, change to group norm if isinstance(downsample, nn.Sequential): downsample = nn.Sequential( - conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False), - norm_type(num_groups=32, num_channels=planes*self.expansion) + conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False), + norm_type(num_groups=32, num_channels=planes * self.expansion), ) super().__init__(in_planes, planes, 3, stride, downsample) - #change norm from batch to group norm + # change norm from batch to group norm self.bn1 = norm_type(num_groups=32, num_channels=planes) self.bn2 = norm_type(num_groups=32, num_channels=planes) self.bn3 = norm_type(num_groups=32, num_channels=planes * self.expansion) - #adapt one convolution which is implemented differently - self.conv2 = conv_type( - planes, - planes, - kernel_size=3, - padding=1, - stride=stride, - groups=32, - bias=False) - - #adapt activation function + # adapt one convolution which is implemented differently + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False) + + # adapt activation function self.relu = nn.PReLU() -class DAF3D_ResNetDilatedBottleneck(DAF3D_ResNetBottleneck): + +class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck): def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): super().__init__(in_planes, planes, spatial_dims, stride, downsample) - #add dilation in second convolution + # add dilation in second convolution conv_type: Callable = Conv[Conv.CONV, spatial_dims] self.conv2 = conv_type( - planes, - planes, - kernel_size=3, - stride=stride, - padding=2, - dilation=2, - groups=32, - bias=False + planes, planes, kernel_size=3, stride=stride, padding=2, dilation=2, groups=32, bias=False ) -class DAF3D_ResNet(ResNet): - def __init__(self, block, layers, block_inplanes, shortcut_type='B'): + +class Daf3dResNet(ResNet): + def __init__(self, block, layers, block_inplanes, shortcut_type="B"): super().__init__(block, layers, block_inplanes, n_input_channels=1, num_classes=2, shortcut_type=shortcut_type) self.in_planes = 64 - #adapt first convolution - conv_type : Callable = Conv[Conv.CONV, 3] + # adapt first convolution + conv_type: Callable = Conv[Conv.CONV, 3] norm_type: Callable = Norm[Norm.GROUP, 3] - self.conv1 = conv_type(1, self.in_planes, kernel_size=7, stride=(1,2,2), padding=(3,3,3), bias=False) + self.conv1 = conv_type(1, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) self.bn1 = norm_type(32, 64) self.relu = nn.PReLU() - - #adapt layers to our needs + + # adapt layers to our needs self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], 3, shortcut_type) self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], 3, shortcut_type, stride=(1, 2, 2)) - self.layer3 = self._make_layer(DAF3D_ResNetDilatedBottleneck, block_inplanes[2], layers[2], 3, shortcut_type, stride=1) - self.layer4 = self._make_layer(DAF3D_ResNetDilatedBottleneck, block_inplanes[3], layers[3], 3, shortcut_type, stride=1) \ No newline at end of file + self.layer3 = self._make_layer( + Daf3dResNetDilatedBottleneck, block_inplanes[2], layers[2], 3, shortcut_type, stride=1 + ) + self.layer4 = self._make_layer( + Daf3dResNetDilatedBottleneck, block_inplanes[3], layers[3], 3, shortcut_type, stride=1 + ) From a3864ed3c26b1018190ecec3bed7393a5fcae6a8 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Thu, 9 Feb 2023 11:15:05 +0100 Subject: [PATCH 06/68] unit test and greater input variability --- monai/networks/nets/__init__.py | 1 + monai/networks/nets/daf3d.py | 12 ++++---- monai/networks/nets/resnet.py | 6 ++-- tests/test_daf3d.py | 49 +++++++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+), 9 deletions(-) create mode 100644 tests/test_daf3d.py diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index 95ddad7842..977d6fead2 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -17,6 +17,7 @@ from .basic_unet import BasicUNet, BasicUnet, Basicunet, basicunet from .basic_unetplusplus import BasicUNetPlusPlus, BasicUnetPlusPlus, BasicunetPlusPlus, basicunetplusplus from .classifier import Classifier, Critic, Discriminator +from .daf3d import DAF3D from .densenet import ( DenseNet, Densenet, diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index 5b8b15d086..9aacdc255b 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -62,9 +62,9 @@ def forward(self, slf, mlf): class Daf3dBackbone(nn.Module): - def __init__(self): + def __init__(self, layers, block_inplanes, n_input_channels): super().__init__() - net = Daf3dResNet(Daf3dResNetBottleneck, [3, 4, 6, 3], [128, 256, 512, 1024]) + net = Daf3dResNet(Daf3dResNetBottleneck, layers, block_inplanes, n_input_channels) net_modules = list(net.children()) self.layer0 = nn.Sequential(*net_modules[:3]) # the layer0 contains the first convolution, bn and relu @@ -90,16 +90,16 @@ def forward(self, x): class DAF3D(nn.Module): - def __init__(self): + def __init__(self, in_channels, out_channels): super().__init__() self.backbone_with_fpn = Daf3dBackboneWithFPN( - backbone=Daf3dBackbone(), + backbone=Daf3dBackbone(layers=[3, 4, 6, 3], block_inplanes=[128, 256, 512, 1024], n_input_channels=in_channels), return_layers={"layer1": "feat1", "layer2": "feat2", "layer3": "feat3", "layer4": "feat4"}, in_channels_list=[256, 512, 1024, 2048], out_channels=128, spatial_dims=3, ) - self.predict1 = nn.Conv3d(128, 1, kernel_size=1) + self.predict1 = nn.Conv3d(128, out_channels, kernel_size=1) group_norm = ("group", {"num_groups": 32, "num_channels": 64}) act_prelu = ("prelu", {"num_parameters": 1, "init": 0.25}) @@ -137,7 +137,7 @@ def __init__(self): self.attention = AttentionModule() self.refine = Convolution(3, 256, 64, kernel_size=1, adn_ordering="NA", norm=group_norm, act=act_prelu) - self.predict2 = nn.Conv3d(64, 1, kernel_size=1) + self.predict2 = nn.Conv3d(64, out_channels, kernel_size=1) self.aspp = Daf3dASPP( 3, 64, diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index ac7bc867b1..d1c750e390 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -469,8 +469,8 @@ def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None) class Daf3dResNet(ResNet): - def __init__(self, block, layers, block_inplanes, shortcut_type="B"): - super().__init__(block, layers, block_inplanes, n_input_channels=1, num_classes=2, shortcut_type=shortcut_type) + def __init__(self, block, layers, block_inplanes, n_input_channels, shortcut_type="B"): + super().__init__(block, layers, block_inplanes, n_input_channels=n_input_channels, num_classes=2, shortcut_type=shortcut_type) self.in_planes = 64 @@ -478,7 +478,7 @@ def __init__(self, block, layers, block_inplanes, shortcut_type="B"): conv_type: Callable = Conv[Conv.CONV, 3] norm_type: Callable = Norm[Norm.GROUP, 3] - self.conv1 = conv_type(1, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) + self.conv1 = conv_type(n_input_channels, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) self.bn1 = norm_type(32, 64) self.relu = nn.PReLU() diff --git a/tests/test_daf3d.py b/tests/test_daf3d.py new file mode 100644 index 0000000000..14e2fdcf31 --- /dev/null +++ b/tests/test_daf3d.py @@ -0,0 +1,49 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import unittest + +import torch +from parameterized import parameterized + +from monai.networks import eval_mode +from monai.networks.nets import DAF3D +from tests.utils import test_script_save + +TEST_CASES = [ + [{"in_channels": 1, "out_channels": 1}, (1, 1, 32, 32, 64), (1, 1, 32, 32, 64)], # single channel 3D, batch 1 + [{"in_channels": 2, "out_channels": 1}, (3, 2, 32, 64, 128), (3, 1, 32, 64, 128)], # two channel 3D, batch 3 + [{"in_channels": 2, "out_channels": 2}, (3, 2, 32, 64, 128), (3, 2, 32, 64, 128)], # two channel 3D, same in & out channels + [{"in_channels": 4, "out_channels": 1}, (5, 4, 35, 35, 35), (5, 1, 35, 35, 35)], # four channel 3D, batch 5 + [{"in_channels": 4, "out_channels": 4}, (5, 4, 35, 35, 35), (5, 4, 35, 35, 35)] # four channel 3D, same in & out channels +] + + +class TestDAF3D(unittest.TestCase): + @parameterized.expand(TEST_CASES) + def test_shape(self, input_param, input_shape, expected_shape): + device = "cuda" if torch.cuda.is_available() else "cpu" + print(input_param) + net = DAF3D(**input_param).to(device) + with eval_mode(net): + result = net(torch.randn(input_shape).to(device)) + self.assertEqual(result.shape, expected_shape) + + def test_script(self): + net = DAF3D(in_channels=1, out_channels=1) + test_data = torch.randn(16, 1, 32, 32) + test_script_save(net, test_data) + + +if __name__ == "__main__": + unittest.main() From d86c80b69551b3949b01081f20dcfb593cb2cfcd Mon Sep 17 00:00:00 2001 From: Carlotta Date: Thu, 16 Feb 2023 11:03:06 +0100 Subject: [PATCH 07/68] quicknat network and quicknat test --- monai/networks/nets/quicknat.py | 226 ++++++++++++++++++++++++++++++++ tests/test_quicknat.py | 37 ++++++ 2 files changed, 263 insertions(+) create mode 100644 monai/networks/nets/quicknat.py create mode 100644 tests/test_quicknat.py diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py new file mode 100644 index 0000000000..8dc62246df --- /dev/null +++ b/monai/networks/nets/quicknat.py @@ -0,0 +1,226 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from squeeze_and_excitation import squeeze_and_excitation as se1 + +from monai.networks.blocks import Bottleneck, ClassifierBlock, ConvConcatDenseBlock, Decoder, Encoder +from monai.networks.blocks import squeeze_and_excitation as se +from monai.networks.layers.factories import Act, Norm +from monai.networks.layers.simplelayers import SkipConnectionWithIndices +from monai.networks.layers.utils import get_dropout_layer, get_pool_layer +from monai.utils import alias, export + + +@export("monai.networks.nets") +@alias("Quicknat") +class QuickNAT(nn.Module): + """ + Model for "Quick segmentation of NeuroAnaTomy (QuickNAT) based on a deep fully convolutional neural network. + Refer to: "QuickNAT: A Fully Convolutional Network for Quick and Accurate Segmentation of Neuroanatomy by + Abhijit Guha Roya, Sailesh Conjetib, Nassir Navabb, Christian Wachingera" + + QuickNAT has an encoder/decoder like 2D F-CNN architecture with 4 encoders and 4 decoders separated by a bottleneck layer. + The final layer is a classifier block with softmax. + The architecture includes skip connections between all encoder and decoder blocks of the same spatial resolution, + similar to the U-Net architecture. + All Encoder and Decoder consist of three convolutional layers all with a Batch Normalization and ReLU. + The first two convolutional layers are followed by a concatenation layer that concatenates + the input feature map with outputs of the current and previous convolutional blocks. + The kernel size of the first two convolutional layers is 5*5, the third convolutional layer has a kernel size of 1*1. + + Data in the encode path is downsampled using max pooling layers instead of upsamling like UNet and in the decode path + upsampled using max un-pooling layers instead of transpose convolutions. + The pooling is done at the beginning of the block and the unpool afterwards. + The indices of the max pooling in the Encoder are forwarded through the layer to be available to the corresponding Decoder. + + The bottleneck block consists of a 5 * 5 convolutional layer and a batch normalization layer + to separate the encoder and decoder part of the network, + restricting information flow between the encoder and decoder. + + The output feature map from the last decoder block is passed to the classifier block, + which is a convolutional layer with 1 * 1 kernel size that maps the input to an N channel feature map, + where N is the number of segmentation classes. + + To further explain this consider the first example network given below. This network has 3 layers with strides + of 2 for each of the middle layers (the last layer is the bottom connection which does not down/up sample). Input + data to this network is immediately reduced in the spatial dimensions by a factor of 2 by the first convolution of + the residual unit defining the first layer of the encode part. The last layer of the decode part will upsample its + input (data from the previous layer concatenated with data from the skip connection) in the first convolution. this + ensures the final output of the network has the same shape as the input. + + Args: + num_classes: number of classes to segmentate (output channels). + num_channels: number of input channels. + num_filters: number of output channels for each convolutional layer in a Dense Block. + kernel_size: size of the kernel of each convolutional layer in a Dense Block. + kernel_c: convolution kernel size of classifier block kernel. + stride_convolution: convolution stride. Defaults to 1. + pool: kernel size of the pooling layer, + stride_pool: stride for the pooling layer. + se_block: Squeeze and Excite block type to be included, defaults to None. Valid options : NONE, CSE, SSE, CSSE, + droup_out: dropout ratio. Defaults to no dropout. + act: activation type and arguments. Defaults to PReLU. + norm: feature normalization type and arguments. Defaults to instance norm. + adn_ordering: a string representing the ordering of activation (A), normalization (N), and dropout (D). + Defaults to "NA". See also: :py:class:`monai.networks.blocks.ADN`. + + Examples:: + + from monai.networks.nets import QuickNAT + + # network with max pooling by a factor of 2 at each layer with no se_block. + net = QuickNAT( + num_classes=3, + num_channels=1, + num_filters=64, + pool = 2, + se_block = "None" + ) + + """ + + def __init__( + self, + num_classes: int = 33, + num_channels: int = 1, + num_filters: int = 64, + kernel_size: int = (5, 5), + kernel_c: int = 1, + stride_conv: int = 1, + pool: int = 2, + stride_pool: int = 2, + # Valid options : NONE, CSE, SSE, CSSE + se_block: str = "None", + drop_out: float = 0, + act: Union[Tuple, str] = Act.PRELU, + norm: Union[Tuple, str] = Norm.INSTANCE, + adn_ordering: str = "NA", + ) -> None: + self.act = act + self.norm = norm + self.adn_ordering = adn_ordering + super(QuickNAT, self).__init__() + se_layer = self.get_selayer(num_filters, se_block) + dropout_layer = get_dropout_layer(name=("dropout", {"p": drop_out}), dropout_dim=2) + max_pool = get_pool_layer( + name=("max", {"kernel_size": pool, "stride": stride_pool, "return_indices": True, "ceil_mode": True}), + spatial_dims=2, + ) + # for the unpooling layer there is currently no Monai implementation available, return to torch implementation + un_pool = nn.MaxUnpool2d(kernel_size=pool, stride=stride_pool) + + # sequence of convolutional strides (like in UNet) not needed as they are always stride_conv. This defaults to 1. + def _create_model(layer: int) -> nn.Module: + """ + Builds the QuickNAT structure from the bottom up by recursing down to the bottelneck layer, then creating sequential + blocks containing the decoder, a skip connection around the previous block, and the encoder. + At the last layer a classifier block is added to the Sequential. + + Args: + layer = inversproportional to the layers left to create + """ + subblock: nn.Module + if layer < 4: + subblock = _create_model(layer + 1) + + else: + subblock = Bottleneck(num_filters, se_layer, dropout_layer, max_pool, un_pool, kernel_size, num_filters) + + if layer == 1: + down = ConvConcatDenseBlock(num_channels, se_layer, dropout_layer, kernel_size, num_filters) + up = ConvConcatDenseBlock(num_filters * 2, self.SELayer, dropout_layer, kernel_size, num_filters) + classifier = ClassifierBlock(2, num_filters, num_classes, stride_conv, kernel_c) + return MySequential(down, SkipConnectionWithIndices(subblock), up, classifier) + else: + up = Decoder(num_filters * 2, un_pool, self.SELayer, dropout_layer, kernel_size, num_filters) + down = Encoder(num_filters, max_pool, self.SELayer, dropout_layer, kernel_size, num_filters) + return MySequential(down, SkipConnectionWithIndices(subblock), up) + + self.model = _create_model(1) + + def get_selayer(self, n_filters, se_block_type = "None"): + """ + Returns the SEBlock defined in the initialization of the QuickNAT model. + + Args: + n_filters: encoding half of the layer + se_block_type: defaults to None. Valid options are None, CSE, SSE, CSSE + Returns: Appropriate SEBlock. SSE and CSSE not implemented in Monai yet. + """ + if se_block_type == "CSE": + self.SELayer = se.ChannelSELayer(2, n_filters) + # not implemented in squeeze_and_excitation in monai + elif se_block_type == "SSE": + self.SELayer = se1.SpatialSELayer(n_filters) + + elif se_block_type == "CSSE": + # not implemented in monai + self.SELayer = se1.ChannelSpatialSELayer(n_filters) + else: + self.SELayer = None + # TODO: Do I include this: + def enable_test_dropout(self): + """ + Enables test time drop out for uncertainity + :return: + """ + attr_dict = self.__dict__["_modules"] + for i in range(1, 5): + encode_block, decode_block = (attr_dict["encode" + str(i)], attr_dict["decode" + str(i)]) + encode_block.drop_out = encode_block.drop_out.apply(nn.Module.train) + decode_block.drop_out = decode_block.drop_out.apply(nn.Module.train) + + @property + def is_cuda(self): + """ + Check if model parameters are allocated on the GPU. + """ + return next(self.parameters()).is_cuda + + def forward(self, input: torch.Tensor) -> torch.Tensor: + input, _ = self.model(input, None) + return input + + # TODO do I include this: + def save(self, path): + """ + Save model with its parameters to the given path. Conventionally the + path should end with '*.model'. + + Inputs: + - path: path string + """ + print("Saving model... %s" % path) + torch.save(self.state_dict(), path) + + +class MySequential(nn.Sequential): + """ + A sequential container. + Modules will be added to it in the order they are passed in the + constructor. + Own implementation to work with the new indices in the forward pass. + """ + + def __init__(self, *args): + super().__init__(*args) + + def forward(self, input, indices): + for module in self: + input, indices = module(input, indices) + return input, indices diff --git a/tests/test_quicknat.py b/tests/test_quicknat.py new file mode 100644 index 0000000000..154876a7ff --- /dev/null +++ b/tests/test_quicknat.py @@ -0,0 +1,37 @@ +from __future__ import annotations +import unittest +import test_torchscript_utils +from parameterized import parameterized + +from monai.networks import eval_mode +from monai.networks.nets import Quicknat +from tests.utils import test_script_save + +TEST_CASES = [ + # params, input_shape, expected_shape + [{"num_classes": 1,"num_channels": 1, "num_filters": 1, "se_block" : None}, (), ()], + [{"num_classes": 1,"num_channels": 1, "num_filters": 4, "se_block" : None}, (), ()], + [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : None}, (), ()], + [{"num_classes": 4,"num_channels": 1, "num_filters": 64, "se_block" : None}, (), ()], + [{"num_classes": 33,"num_channels": 1, "num_filters": 64, "se_block" : None}, (), ()], + [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : "CSE"}, (), ()], + [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : "SSE"}, (), ()], + [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : "CSSE"}, (), ()] +] + +class TestQuicknat(unittest.TestCase): + @parameterized.expand(TEST_CASES) + def test_shape(self, input_param, input_shape, expected_shape): + device = "cuda" if torch.cuda.is_available() else "cpu" + print(input_param) + net = Quicknat(**input_param).to(device) + with eval_mode(net): + result = net(torch.randn(input_shape).to(device)) + self.assertEqual(result.shape, expected_shape) + def test_script(self): + net = Quicknat(in_channels = 1, out_channels = 1) + test_data = torch.randn(16,1,32,32) + test_script_save(net, test_data) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file From 3627c8eba6f88983860837f6bd7dce9f0cb93b2e Mon Sep 17 00:00:00 2001 From: Carlotta Date: Fri, 17 Feb 2023 09:55:17 +0100 Subject: [PATCH 08/68] quicknat network and quicknat test --- monai/networks/nets/quicknat.py | 53 +++++++++++++------------------- tests/test_quicknat.py | 54 +++++++++++++++++++++------------ 2 files changed, 56 insertions(+), 51 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 8dc62246df..195ac96bf2 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -11,9 +11,8 @@ from __future__ import annotations -from typing import Tuple, Union +from typing import Sequence, Tuple, Union -import numpy as np import torch import torch.nn as nn from squeeze_and_excitation import squeeze_and_excitation as se1 @@ -21,14 +20,15 @@ from monai.networks.blocks import Bottleneck, ClassifierBlock, ConvConcatDenseBlock, Decoder, Encoder from monai.networks.blocks import squeeze_and_excitation as se from monai.networks.layers.factories import Act, Norm -from monai.networks.layers.simplelayers import SkipConnectionWithIndices +from monai.networks.layers.simplelayers import SkipConnectionWithIdx from monai.networks.layers.utils import get_dropout_layer, get_pool_layer -from monai.utils import alias, export +from monai.utils import export + +__all__ = ["Quicknat"] @export("monai.networks.nets") -@alias("Quicknat") -class QuickNAT(nn.Module): +class Quicknat(nn.Module): """ Model for "Quick segmentation of NeuroAnaTomy (QuickNAT) based on a deep fully convolutional neural network. Refer to: "QuickNAT: A Fully Convolutional Network for Quick and Accurate Segmentation of Neuroanatomy by @@ -99,7 +99,7 @@ def __init__( num_classes: int = 33, num_channels: int = 1, num_filters: int = 64, - kernel_size: int = (5, 5), + kernel_size: Sequence[int] | int = 5, kernel_c: int = 1, stride_conv: int = 1, pool: int = 2, @@ -114,7 +114,7 @@ def __init__( self.act = act self.norm = norm self.adn_ordering = adn_ordering - super(QuickNAT, self).__init__() + super().__init__() se_layer = self.get_selayer(num_filters, se_block) dropout_layer = get_dropout_layer(name=("dropout", {"p": drop_out}), dropout_dim=2) max_pool = get_pool_layer( @@ -143,17 +143,17 @@ def _create_model(layer: int) -> nn.Module: if layer == 1: down = ConvConcatDenseBlock(num_channels, se_layer, dropout_layer, kernel_size, num_filters) - up = ConvConcatDenseBlock(num_filters * 2, self.SELayer, dropout_layer, kernel_size, num_filters) + up = ConvConcatDenseBlock(num_filters * 2, se_layer, dropout_layer, kernel_size, num_filters) classifier = ClassifierBlock(2, num_filters, num_classes, stride_conv, kernel_c) - return MySequential(down, SkipConnectionWithIndices(subblock), up, classifier) + return SequentialWithIdx(down, SkipConnectionWithIdx(subblock), up, classifier) else: - up = Decoder(num_filters * 2, un_pool, self.SELayer, dropout_layer, kernel_size, num_filters) - down = Encoder(num_filters, max_pool, self.SELayer, dropout_layer, kernel_size, num_filters) - return MySequential(down, SkipConnectionWithIndices(subblock), up) + up = Decoder(num_filters * 2, un_pool, se_layer, dropout_layer, kernel_size, num_filters) + down = Encoder(num_filters, max_pool, se_layer, dropout_layer, kernel_size, num_filters) + return SequentialWithIdx(down, SkipConnectionWithIdx(subblock), up) self.model = _create_model(1) - def get_selayer(self, n_filters, se_block_type = "None"): + def get_selayer(self, n_filters, se_block_type="None"): """ Returns the SEBlock defined in the initialization of the QuickNAT model. @@ -163,17 +163,18 @@ def get_selayer(self, n_filters, se_block_type = "None"): Returns: Appropriate SEBlock. SSE and CSSE not implemented in Monai yet. """ if se_block_type == "CSE": - self.SELayer = se.ChannelSELayer(2, n_filters) + return se.ChannelSELayer(2, n_filters) # not implemented in squeeze_and_excitation in monai elif se_block_type == "SSE": - self.SELayer = se1.SpatialSELayer(n_filters) + return se1.SpatialSELayer(n_filters) elif se_block_type == "CSSE": # not implemented in monai - self.SELayer = se1.ChannelSpatialSELayer(n_filters) + return se1.ChannelSpatialSELayer(n_filters) else: - self.SELayer = None - # TODO: Do I include this: + return None + + # TODO: Do I include this: def enable_test_dropout(self): """ Enables test time drop out for uncertainity @@ -195,21 +196,9 @@ def is_cuda(self): def forward(self, input: torch.Tensor) -> torch.Tensor: input, _ = self.model(input, None) return input - - # TODO do I include this: - def save(self, path): - """ - Save model with its parameters to the given path. Conventionally the - path should end with '*.model'. - - Inputs: - - path: path string - """ - print("Saving model... %s" % path) - torch.save(self.state_dict(), path) -class MySequential(nn.Sequential): +class SequentialWithIdx(nn.Sequential): """ A sequential container. Modules will be added to it in the order they are passed in the diff --git a/tests/test_quicknat.py b/tests/test_quicknat.py index 154876a7ff..7f6f180597 100644 --- a/tests/test_quicknat.py +++ b/tests/test_quicknat.py @@ -1,37 +1,53 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from __future__ import annotations + import unittest -import test_torchscript_utils -from parameterized import parameterized + +import torch +from parameterized import parameterized from monai.networks import eval_mode -from monai.networks.nets import Quicknat +from monai.networks.nets import Quicknat from tests.utils import test_script_save TEST_CASES = [ - # params, input_shape, expected_shape - [{"num_classes": 1,"num_channels": 1, "num_filters": 1, "se_block" : None}, (), ()], - [{"num_classes": 1,"num_channels": 1, "num_filters": 4, "se_block" : None}, (), ()], - [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : None}, (), ()], - [{"num_classes": 4,"num_channels": 1, "num_filters": 64, "se_block" : None}, (), ()], - [{"num_classes": 33,"num_channels": 1, "num_filters": 64, "se_block" : None}, (), ()], - [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : "CSE"}, (), ()], - [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : "SSE"}, (), ()], - [{"num_classes": 1,"num_channels": 1, "num_filters": 64, "se_block" : "CSSE"}, (), ()] + # params, input_shape, expected_shape + [{"num_classes": 1, "num_channels": 1, "num_filters": 1, "se_block": None}, (1, 1, 32, 32), (1, 1, 32, 32)], + [{"num_classes": 1, "num_channels": 1, "num_filters": 4, "se_block": None}, (1, 1, 64, 64), (1, 1, 64, 64)], + [{"num_classes": 1, "num_channels": 1, "num_filters": 64, "se_block": None}, (1, 1, 128, 128), (1, 1, 128, 128)], + [{"num_classes": 4, "num_channels": 1, "num_filters": 64, "se_block": None}, (1, 1, 32, 32), (1, 4, 32, 32)], + [{"num_classes": 33, "num_channels": 1, "num_filters": 64, "se_block": None}, (1, 1, 32, 32), (1, 33, 32, 32)], + [{"num_classes": 1, "num_channels": 1, "num_filters": 64, "se_block": "CSE"}, (1, 1, 32, 32), (1, 1, 32, 32)], + [{"num_classes": 1, "num_channels": 1, "num_filters": 64, "se_block": "SSE"}, (1, 1, 32, 32), (1, 1, 32, 32)], + [{"num_classes": 1, "num_channels": 1, "num_filters": 64, "se_block": "CSSE"}, (1, 1, 32, 32), (1, 1, 32, 32)], ] -class TestQuicknat(unittest.TestCase): + +class TestQuicknat(unittest.TestCase): @parameterized.expand(TEST_CASES) - def test_shape(self, input_param, input_shape, expected_shape): + def test_shape(self, input_param, input_shape, expected_shape): device = "cuda" if torch.cuda.is_available() else "cpu" print(input_param) net = Quicknat(**input_param).to(device) - with eval_mode(net): + with eval_mode(net): result = net(torch.randn(input_shape).to(device)) self.assertEqual(result.shape, expected_shape) - def test_script(self): - net = Quicknat(in_channels = 1, out_channels = 1) - test_data = torch.randn(16,1,32,32) + + def test_script(self): + net = Quicknat(num_classes=1, num_channels=1) + test_data = torch.randn(16, 1, 32, 32) test_script_save(net, test_data) + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() From df7a97c1476c2672f2d787a489fd34f5e9d0c5c3 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Mon, 20 Feb 2023 13:29:27 +0100 Subject: [PATCH 09/68] reset to non-variable blocks, not all format tests passing yet --- monai/networks/blocks/aspp.py | 47 ++++- monai/networks/blocks/backbone_fpn_utils.py | 23 ++- .../blocks/feature_pyramid_network.py | 19 +- monai/networks/nets/daf3d.py | 144 +++++++++----- monai/networks/nets/resnet.py | 176 +++++++++++++----- tests/test_daf3d.py | 12 +- 6 files changed, 319 insertions(+), 102 deletions(-) diff --git a/monai/networks/blocks/aspp.py b/monai/networks/blocks/aspp.py index 97b3b8fdbd..dad08cb75e 100644 --- a/monai/networks/blocks/aspp.py +++ b/monai/networks/blocks/aspp.py @@ -109,19 +109,51 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class Daf3dASPP(SimpleASPP): + """ + Atrous Spatial Pyramid Pooling module as used in 'Deep Attentive Features for Prostate Segmentation in + 3D Transrectal Ultrasound' . Core functionality as in SimpleASPP, but after each + layerwise convolution a group normalization is added. Further weight initialization for convolutions is provided in + _init_weight(). Additional possibility to specify the number of final output channels. + + Args: + spatial_dims: number of spatial dimensions, could be 1, 2, or 3. + in_channels: number of input channels. + conv_out_channels: number of output channels of each atrous conv. + out_channels: number of output channels of final convolution. + If None, uses len(kernel_sizes) * conv_out_channels + kernel_sizes: a sequence of four convolutional kernel sizes. + Defaults to (1, 3, 3, 3) for four (dilated) convolutions. + dilations: a sequence of four convolutional dilation parameters. + Defaults to (1, 2, 4, 6) for four (dilated) convolutions. + norm_type: final kernel-size-one convolution normalization type. + Defaults to batch norm. + acti_type: final kernel-size-one convolution activation type. + Defaults to leaky ReLU. + bias: whether to have a bias term in convolution blocks. Defaults to False. + According to `Performance Tuning Guide `_, + if a conv layer is directly followed by a batch norm layer, bias should be False. + + Raises: + ValueError: When ``kernel_sizes`` length differs from ``dilations``. + """ + def __init__( self, spatial_dims: int, in_channels: int, conv_out_channels: int, + out_channels: int | None = None, kernel_sizes: Sequence[int] = (1, 3, 3, 3), dilations: Sequence[int] = (1, 2, 4, 6), norm_type: tuple | str | None = "BATCH", + acti_type: tuple | str | None = "LEAKYRELU", bias: bool = False, ) -> None: - super().__init__(spatial_dims, in_channels, conv_out_channels, kernel_sizes, dilations, norm_type, bias=bias) + super().__init__( + spatial_dims, in_channels, conv_out_channels, kernel_sizes, dilations, norm_type, acti_type, bias + ) - # change convolutions in self.convs so they fit our needs + # add normalization after each atrous convolution, initializes weights new_convs = nn.ModuleList() for _conv in self.convs: tmp_conv = Convolution(1, 1, 1) @@ -131,14 +163,17 @@ def __init__( new_convs.append(tmp_conv) self.convs = new_convs - # change final convolution + # change final convolution to different out_channels + if out_channels is None: + out_channels = len(kernel_sizes) * conv_out_channels + self.conv_k1 = Convolution( spatial_dims=3, - in_channels=4 * in_channels, - out_channels=conv_out_channels, + in_channels=len(kernel_sizes) * conv_out_channels, + out_channels=out_channels, kernel_size=1, - adn_ordering="N", norm=norm_type, + act=acti_type, ) def _init_weight(self, conv): diff --git a/monai/networks/blocks/backbone_fpn_utils.py b/monai/networks/blocks/backbone_fpn_utils.py index 9f4864e5ab..19dc165a0a 100644 --- a/monai/networks/blocks/backbone_fpn_utils.py +++ b/monai/networks/blocks/backbone_fpn_utils.py @@ -61,7 +61,7 @@ torchvision_models, _ = optional_import("torchvision.models") -__all__ = ["BackboneWithFPN"] +__all__ = ["BackboneWithFPN", "Daf3dBackboneWithFPN"] class BackboneWithFPN(nn.Module): @@ -176,6 +176,25 @@ def _resnet_fpn_extractor( class Daf3dBackboneWithFPN(BackboneWithFPN): + """ + Same as BackboneWithFPN but uses custom Daf3DFPN as feature pyramid network + + Args: + backbone: backbone network + return_layers: a dict containing the names + of the modules for which the activations will be returned as + the key of the dict, and the value of the dict is the name + of the returned activation (which the user can specify). + in_channels_list: number of channels for each feature map + that is returned, in the order they are present in the OrderedDict + out_channels: number of channels in the FPN. + spatial_dims: 2D or 3D images + extra_blocks: if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + """ + def __init__( self, backbone: nn.Module, @@ -197,4 +216,4 @@ def __init__( else: raise ValueError("Could not find spatial_dims of backbone, please specify it.") - self.fpn = Daf3dFPN(spatial_dims, in_channels_list, out_channels) + self.fpn = Daf3dFPN(spatial_dims, in_channels_list, out_channels, extra_blocks) diff --git a/monai/networks/blocks/feature_pyramid_network.py b/monai/networks/blocks/feature_pyramid_network.py index fa7147ca84..b0002971a2 100644 --- a/monai/networks/blocks/feature_pyramid_network.py +++ b/monai/networks/blocks/feature_pyramid_network.py @@ -61,7 +61,7 @@ from monai.networks.blocks.convolutions import Convolution from monai.networks.layers.factories import Conv, Pool -__all__ = ["ExtraFPNBlock", "LastLevelMaxPool", "LastLevelP6P7", "FeaturePyramidNetwork"] +__all__ = ["ExtraFPNBlock", "LastLevelMaxPool", "LastLevelP6P7", "FeaturePyramidNetwork", "Daf3dFPN"] class ExtraFPNBlock(nn.Module): @@ -266,6 +266,22 @@ def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: class Daf3dFPN(FeaturePyramidNetwork): + """ + Feature Pyramid Network as used in 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' + . + Omits 3x3x3 convolution of layer_blocks and interpolates resulting feature maps to be the same size as + feature map with highest resolution. + + Args: + spatial_dims: 2D or 3D images + in_channels_list: number of channels for each feature map that is passed to the module + out_channels: number of channels of the FPN representation + extra_blocks: if provided, extra operations will be performed. + It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + """ + def __init__( self, spatial_dims: int, @@ -309,6 +325,7 @@ def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: if self.extra_blocks is not None: results, names = self.extra_blocks(results, x_values, names) + # bring all layers to same size results = [results[0]] + [F.interpolate(l, size=x["feat1"].size()[2:], mode="trilinear") for l in results[1:]] # make it back an OrderedDict out = OrderedDict(list(zip(names, results))) diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index 9aacdc255b..06dcb48d87 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -19,66 +19,77 @@ from monai.networks.blocks.aspp import Daf3dASPP from monai.networks.blocks.backbone_fpn_utils import Daf3dBackboneWithFPN from monai.networks.blocks.convolutions import Convolution -from monai.networks.nets.resnet import Daf3dResNet, Daf3dResNetBottleneck +from monai.networks.nets.resnet import Daf3dResNet __all__ = ["AttentionModule", "Daf3dBackbone", "DAF3D"] class AttentionModule(nn.Module): - def __init__(self): + """ + Attention Module as described in 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' + . Returns refined single layer feature (SLF) and attentive map + + Args: + spatial_dims: dimension of inputs. + in_channels: number of input channels (channels of slf and mlf). + out_channels: number of output channels (channels of attentive map and refined slf). + norm: normalization type. + act: activation type. + """ + + def __init__( + self, + spatial_dims, + in_channels, + out_channels, + norm=("group", {"num_groups": 32, "num_channels": 64}), + act="PRELU", + ): super().__init__() - group_norm = ("group", {"num_groups": 32, "num_channels": 64}) - self.attentive_map = nn.Sequential( - Convolution(spatial_dims=3, in_channels=192, out_channels=64, kernel_size=1, norm=group_norm, act="PRELU"), + Convolution(spatial_dims, in_channels, out_channels, kernel_size=1, norm=norm, act=act), + Convolution(spatial_dims, out_channels, out_channels, kernel_size=3, padding=1, norm=norm, act=act), Convolution( - spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU" - ), - Convolution( - spatial_dims=3, - in_channels=64, - out_channels=64, - kernel_size=3, - padding=1, - adn_ordering="A", - act="SIGMOID", + spatial_dims, out_channels, out_channels, kernel_size=3, padding=1, adn_ordering="A", act="SIGMOID" ), ) self.refine = nn.Sequential( - Convolution(spatial_dims=3, in_channels=192, out_channels=64, kernel_size=1, norm=group_norm, act="PRELU"), - Convolution( - spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU" - ), - Convolution( - spatial_dims=3, in_channels=64, out_channels=64, kernel_size=3, padding=1, norm=group_norm, act="PRELU" - ), + Convolution(spatial_dims, in_channels, out_channels, kernel_size=1, norm=norm, act=act), + Convolution(spatial_dims, out_channels, out_channels, kernel_size=3, padding=1, norm=norm, act=act), + Convolution(spatial_dims, out_channels, out_channels, kernel_size=3, padding=1, norm=norm, act=act), ) def forward(self, slf, mlf): att = self.attentive_map(torch.cat((slf, mlf), 1)) out = self.refine(torch.cat((slf, att * mlf), 1)) - return out + return (out, att) class Daf3dBackbone(nn.Module): - def __init__(self, layers, block_inplanes, n_input_channels): + """ + Backbone for 3D Feature Pyramid Network in DAF3D module based on 'Deep Attentive Features for Prostate Segmentation in + 3D Transrectal Ultrasound' . + + Args: + n_input_channels: number of input channels for the first convolution. + """ + + def __init__(self, n_input_channels): super().__init__() - net = Daf3dResNet(Daf3dResNetBottleneck, layers, block_inplanes, n_input_channels) + net = Daf3dResNet( + layers=[3, 4, 6, 3], + block_inplanes=[128, 256, 512, 1024], + n_input_channels=n_input_channels, + num_classes=2, + bias_downsample=False, + ) net_modules = list(net.children()) self.layer0 = nn.Sequential(*net_modules[:3]) - # the layer0 contains the first convolution, bn and relu self.layer1 = nn.Sequential(*net_modules[3:5]) - # the layer1 contains the first pooling and the first 3 bottle blocks self.layer2 = net_modules[5] - # the layer2 contains the second 4 bottle blocks self.layer3 = net_modules[6] - # the layer3 contains the media bottle blocks - # with 6 in 50-layers and 23 in 101-layers self.layer4 = net_modules[7] - # the layer4 contains the final 3 bottle blocks - # according the backbone the next is avg-pooling and dense with num classes uints - # but we don't use the final two layers in backbone networks def forward(self, x): layer0 = self.layer0(x) @@ -90,10 +101,30 @@ def forward(self, x): class DAF3D(nn.Module): - def __init__(self, in_channels, out_channels): + """ + DAF3D network based on 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' + . + The network consists of a 3D Feature Pyramid Network which is applied on the feature maps of a 3D ResNet, + followed by a custom Attention Module and an ASPP module. + During training the supervised signal consists of the outputs of the FPN (four Single Layer Features, SLFs), + the outputs of the attention module (four Attentive Features) and the final prediction. + They are individually compared to the ground truth, the final loss consists of a weighted sum of all + individual losses (see DAF3D tutorial for details). + There is an additional possiblity to return all supervised signals as well as the Attentive Maps in validation + mode to visualize inner functionality of the network. + + Args: + in_channels: number of input channels. + out_channels: number of output channels. + visual_output: whether to return all SLFs, Attentive Maps, Refined SLFs in validation mode + can be used to visualize inner functionality of the network + """ + + def __init__(self, in_channels, out_channels, visual_output=False): super().__init__() + self.visual_output = visual_output self.backbone_with_fpn = Daf3dBackboneWithFPN( - backbone=Daf3dBackbone(layers=[3, 4, 6, 3], block_inplanes=[128, 256, 512, 1024], n_input_channels=in_channels), + backbone=Daf3dBackbone(in_channels), return_layers={"layer1": "feat1", "layer2": "feat2", "layer3": "feat3", "layer4": "feat4"}, in_channels_list=[256, 512, 1024, 2048], out_channels=128, @@ -134,36 +165,59 @@ def __init__(self, in_channels, out_channels): act=act_prelu, ), ) - self.attention = AttentionModule() + self.attention = AttentionModule( + spatial_dims=3, in_channels=192, out_channels=64, norm=group_norm, act=act_prelu + ) self.refine = Convolution(3, 256, 64, kernel_size=1, adn_ordering="NA", norm=group_norm, act=act_prelu) self.predict2 = nn.Conv3d(64, out_channels, kernel_size=1) self.aspp = Daf3dASPP( - 3, - 64, - 64, + spatial_dims=3, + in_channels=64, + conv_out_channels=64, + out_channels=64, kernel_sizes=(3, 3, 3, 3), dilations=((1, 1, 1), (1, 6, 6), (1, 12, 12), (1, 18, 18)), norm_type=group_norm, + acti_type=None, bias=True, ) def forward(self, x): # layers from 1 - 4 single_layer_features = list(self.backbone_with_fpn(x).values()) - # first 4 supervised signals (slfs 1 - 4) + + # first 4 supervised signals (SLFs 1 - 4) supervised1 = [self.predict1(slf) for slf in single_layer_features] + mlf = self.fuse(torch.cat(single_layer_features, 1)) - attentive_feature_maps = [self.attention(slf, mlf) for slf in single_layer_features] + + attentive_features_maps = [self.attention(slf, mlf) for slf in single_layer_features] + att_features, att_maps = zip(*attentive_features_maps) + # second 4 supervised signals (af 1 - 4) - supervised2 = [self.predict2(af) for af in attentive_feature_maps] - attentive_mlf = self.refine(torch.cat(attentive_feature_maps, 1)) + supervised2 = [self.predict2(af) for af in att_features] + + # attentive maps as optional additional output + supervised3 = [self.predict2(am) for am in att_maps] + + attentive_mlf = self.refine(torch.cat(att_features, 1)) + aspp = self.aspp(attentive_mlf) + supervised_final = self.predict2(aspp) if self.training: - output = supervised1 + supervised2 + [supervised_final] + output = *supervised1, *supervised2, supervised_final output = [F.interpolate(o, size=x.size()[2:], mode="trilinear") for o in output] else: - output = F.interpolate(supervised_final, size=x.size()[2:], mode="trilinear") + if self.visual_output: + supervised_final = F.interpolate(supervised_final, size=x.size()[2:], mode="trilinear") + supervised_inner = [ + F.interpolate(o, size=x.size()[2:], mode="trilinear") + for o in supervised1 + supervised2 + supervised3 + ] + output = supervised_final, supervised_inner + else: + output = F.interpolate(supervised_final, size=x.size()[2:], mode="trilinear") return output diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index d1c750e390..33ae4465ae 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -34,6 +34,9 @@ "resnet101", "resnet152", "resnet200", + "Daf3dResNetBottleneck", + "Daf3dResNetDilatedBottleneck", + "Daf3dResNet", ] @@ -154,6 +157,71 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return out +class Daf3dResNetBottleneck(ResNetBottleneck): + """ + ResNetBottleneck block as used in 'Deep Attentive Features for Prostate Segmentation in 3D + Transrectal Ultrasound' . + Instead of Batch Norm Group Norm is used, instead of ReLU PReLU activation is used. + Initial expansion is 2 instead of 4 and second convolution uses groups. + + Args: + in_planes: number of input channels. + planes: number of output channels (taking expansion into account). + spatial_dims: number of spatial dimensions of the input image. + stride: stride to use for second conv layer. + downsample: which downsample layer to use. + """ + + expansion = 2 + + def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): + norm_type: Callable = Norm[Norm.GROUP, spatial_dims] + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + + # in case downsample uses batch norm, change to group norm + if isinstance(downsample, nn.Sequential): + downsample = nn.Sequential( + conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False), + norm_type(num_groups=32, num_channels=planes * self.expansion), + ) + + super().__init__(in_planes, planes, spatial_dims, stride, downsample) + + # change norm from batch to group norm + self.bn1 = norm_type(num_groups=32, num_channels=planes) + self.bn2 = norm_type(num_groups=32, num_channels=planes) + self.bn3 = norm_type(num_groups=32, num_channels=planes * self.expansion) + + # adapt second convolution to work with groups + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False) + + # adapt activation function + self.relu = nn.PReLU() + + +class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck): + """ + ResNetDilatedBottleneck as used in 'Deep Attentive Features for Prostate Segmentation in 3D + Transrectal Ultrasound' . + Same as Daf3dResNetBottleneck but dilation of 2 is used in second convolution. + Args: + in_planes: number of input channels. + planes: number of output channels (taking expansion into account). + spatial_dims: number of spatial dimensions of the input image. + stride: stride to use for second conv layer. + downsample: which downsample layer to use. + """ + + def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): + super().__init__(in_planes, planes, spatial_dims, stride, downsample) + + # add dilation in second convolution + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + self.conv2 = conv_type( + planes, planes, kernel_size=3, stride=stride, padding=2, dilation=2, groups=32, bias=False + ) + + class ResNet(nn.Module): """ ResNet based on: `Deep Residual Learning for Image Recognition `_ @@ -429,65 +497,81 @@ def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs) -class Daf3dResNetBottleneck(ResNetBottleneck): - expansion = 2 - - def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): - norm_type: Callable = Norm[Norm.GROUP, spatial_dims] - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - - # in case downsample uses batch norm, change to group norm - if isinstance(downsample, nn.Sequential): - downsample = nn.Sequential( - conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False), - norm_type(num_groups=32, num_channels=planes * self.expansion), - ) - - super().__init__(in_planes, planes, 3, stride, downsample) - - # change norm from batch to group norm - self.bn1 = norm_type(num_groups=32, num_channels=planes) - self.bn2 = norm_type(num_groups=32, num_channels=planes) - self.bn3 = norm_type(num_groups=32, num_channels=planes * self.expansion) - - # adapt one convolution which is implemented differently - self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False) - - # adapt activation function - self.relu = nn.PReLU() +class Daf3dResNet(ResNet): + """ + ResNet as used in 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' + . + Uses two Daf3dResNetBottleneck blocks followed by two Daf3dResNetDilatedBottleneck blocks. + Args: + layers: how many layers to use. + block_inplanes: determine the size of planes at each step. Also tunable with widen_factor. + spatial_dims: number of spatial dimensions of the input image. + n_input_channels: number of input channels for first convolutional layer. + conv1_t_size: size of first convolution layer, determines kernel and padding. + conv1_t_stride: stride of first convolution layer. + no_max_pool: bool argument to determine if to use maxpool layer. + shortcut_type: which downsample block to use. Options are 'A', 'B', default to 'B'. + - 'A': using `self._downsample_basic_block`. + - 'B': kernel_size 1 conv + norm. + widen_factor: widen output for each layer. + num_classes: number of output (classifications). + feed_forward: whether to add the FC layer for the output, default to `True`. + bias_downsample: whether to use bias term in the downsampling block when `shortcut_type` is 'B', default to `True`. -class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck): - def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): - super().__init__(in_planes, planes, spatial_dims, stride, downsample) + """ - # add dilation in second convolution - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - self.conv2 = conv_type( - planes, planes, kernel_size=3, stride=stride, padding=2, dilation=2, groups=32, bias=False + def __init__( + self, + layers: list[int], + block_inplanes: list[int], + spatial_dims: int = 3, + n_input_channels: int = 3, + conv1_t_size: tuple[int] | int = 7, + conv1_t_stride: tuple[int] | int = 1, + no_max_pool: bool = False, + shortcut_type: str = "B", + widen_factor: float = 1.0, + num_classes: int = 400, + feed_forward: bool = True, + bias_downsample: bool = True, # for backwards compatibility (also see PR #5477) + ): + super().__init__( + ResNetBottleneck, + layers, + block_inplanes, + spatial_dims, + n_input_channels, + conv1_t_size, + conv1_t_stride, + no_max_pool, + shortcut_type, + widen_factor, + num_classes, + feed_forward, + bias_downsample, ) - -class Daf3dResNet(ResNet): - def __init__(self, block, layers, block_inplanes, n_input_channels, shortcut_type="B"): - super().__init__(block, layers, block_inplanes, n_input_channels=n_input_channels, num_classes=2, shortcut_type=shortcut_type) - self.in_planes = 64 - # adapt first convolution - conv_type: Callable = Conv[Conv.CONV, 3] - norm_type: Callable = Norm[Norm.GROUP, 3] + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.GROUP, spatial_dims] - self.conv1 = conv_type(n_input_channels, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) + # adapt first convolution to work with new in_planes + self.conv1 = conv_type( + n_input_channels, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False + ) self.bn1 = norm_type(32, 64) self.relu = nn.PReLU() # adapt layers to our needs - self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], 3, shortcut_type) - self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], 3, shortcut_type, stride=(1, 2, 2)) + self.layer1 = self._make_layer(Daf3dResNetBottleneck, block_inplanes[0], layers[0], spatial_dims, shortcut_type) + self.layer2 = self._make_layer( + Daf3dResNetBottleneck, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=(1, 2, 2) + ) self.layer3 = self._make_layer( - Daf3dResNetDilatedBottleneck, block_inplanes[2], layers[2], 3, shortcut_type, stride=1 + Daf3dResNetDilatedBottleneck, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=1 ) self.layer4 = self._make_layer( - Daf3dResNetDilatedBottleneck, block_inplanes[3], layers[3], 3, shortcut_type, stride=1 + Daf3dResNetDilatedBottleneck, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=1 ) diff --git a/tests/test_daf3d.py b/tests/test_daf3d.py index 14e2fdcf31..ccf629801f 100644 --- a/tests/test_daf3d.py +++ b/tests/test_daf3d.py @@ -23,9 +23,17 @@ TEST_CASES = [ [{"in_channels": 1, "out_channels": 1}, (1, 1, 32, 32, 64), (1, 1, 32, 32, 64)], # single channel 3D, batch 1 [{"in_channels": 2, "out_channels": 1}, (3, 2, 32, 64, 128), (3, 1, 32, 64, 128)], # two channel 3D, batch 3 - [{"in_channels": 2, "out_channels": 2}, (3, 2, 32, 64, 128), (3, 2, 32, 64, 128)], # two channel 3D, same in & out channels + [ + {"in_channels": 2, "out_channels": 2}, + (3, 2, 32, 64, 128), + (3, 2, 32, 64, 128), + ], # two channel 3D, same in & out channels [{"in_channels": 4, "out_channels": 1}, (5, 4, 35, 35, 35), (5, 1, 35, 35, 35)], # four channel 3D, batch 5 - [{"in_channels": 4, "out_channels": 4}, (5, 4, 35, 35, 35), (5, 4, 35, 35, 35)] # four channel 3D, same in & out channels + [ + {"in_channels": 4, "out_channels": 4}, + (5, 4, 35, 35, 35), + (5, 4, 35, 35, 35), + ], # four channel 3D, same in & out channels ] From 2ad7e274960855009e01fdc044f1f9091d166019 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Mon, 20 Feb 2023 13:31:20 +0100 Subject: [PATCH 10/68] fixed return in training mode --- monai/networks/nets/daf3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index 06dcb48d87..d51ede626e 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -208,7 +208,7 @@ def forward(self, x): supervised_final = self.predict2(aspp) if self.training: - output = *supervised1, *supervised2, supervised_final + output = supervised1 + supervised2 + [supervised_final] output = [F.interpolate(o, size=x.size()[2:], mode="trilinear") for o in output] else: if self.visual_output: From 01ad50faf6b810424d3d5e9894130ad76c8d76c1 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Fri, 24 Feb 2023 17:47:16 +0100 Subject: [PATCH 11/68] included convConcatDenseBlock, Encoder, Decoder, bottleneck Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/blocks/denseblock.py | 179 +++++++++++++++++++++++++++- 1 file changed, 177 insertions(+), 2 deletions(-) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index afd3183581..7d8c2f93ce 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -11,7 +11,7 @@ from __future__ import annotations -from collections.abc import Sequence +from typing import Sequence import torch import torch.nn as nn @@ -19,7 +19,7 @@ from monai.networks.blocks import Convolution, ResidualUnit from monai.networks.layers.factories import Act, Norm -__ALL__ = ["DenseBlock", "ConvDenseBlock"] +__ALL__ = ["DenseBlock", "ConvDenseBlock", "ConvConcatDenseBlock", "Bottleneck", "Encoder", "Decoder"] class DenseBlock(nn.Sequential): @@ -129,3 +129,178 @@ def _get_layer(self, in_channels, out_channels, dilation): dilation=dilation, bias=self.bias, ) + + +class ConvConcatDenseBlock(ConvDenseBlock): + """ + This dense block is defined as a sequence of 'Convolution' blocks. It overwrite the '_get_layer' methodto change the ordering of + Every convolutional layer is preceded by a batch-normalization layer and a Rectifier Linear Unit (ReLU) layer. + The first two convolutional layers are followed by a concatenation layer that concatenates + the input feature map with outputs of the current and previous convolutional blocks. + Kernel size of two convolutional layers kept small to limit number of paramters. + Appropriate padding is provided so that the size of feature maps before and after convolution remains constant. + The output channels for each convolution layer is set to 64, which acts as a bottle- neck for feature map selectivity. + The input channel size is variable, depending on the number of dense connections. + The third convolutional layer is also preceded by a batch normalization and ReLU, + but has a 1 * 1 kernel size to compress the feature map size to 64. + Args: + in_channles: variable depending on depth of the network + seLayer: Squeeze and Excite block to be included, defaults to None, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, + dropout_layer: Dropout block to be included, defaults to None. + :return: forward passed tensor + """ + + def __init__( + self, + in_channels: int, + se_layer: nn.Module | None = None, + dropout_layer: type[nn.Dropout2d] | None = None, + kernel_size: Sequence[int] | int = 5, + num_filters: int = 64, + ): + self.count = 0 + super().__init__( + in_channels=in_channels, + spatial_dims=2, + # number of channels stay constant throughout the convolution layers + channels=[num_filters, num_filters, num_filters], + norm=("instance", {"num_features": in_channels}), + kernel_size=kernel_size, + ) + self.se_layer = se_layer + self.dropout_layer = dropout_layer + + def _get_layer(self, in_channels, out_channels, dilation): + """ + After ever convolutional layer the output is concatenated with the input and the layer before. + The concatenated output is used as input to the next convolutional layer. + + Args: + in_channels: number of input channels. + out_channels: number of output channels. + strides: convolution stride. + is_top: True if this is the top block. + """ + kernelsize = self.kernel_size if self.count < 2 else (1, 1) + # padding = None if self.count < 2 else (0, 0) + self.count += 1 + conv = Convolution( + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=1, + kernel_size=kernelsize, + act=self.act, + norm=("instance", {"num_features": in_channels}), + ) + return nn.Sequential(conv.get_submodule("adn"), conv.get_submodule("conv")) + + def forward(self, input, _): + i = 0 + result = input + for l in self.children(): + # ignoring the max (un-)pool and droupout already added in the initial initialization step + if isinstance(l, nn.MaxPool2d) or isinstance(l, nn.MaxUnpool2d) or isinstance(l, nn.Dropout2d): + continue + # first convolutional forward + result = l(result) + if i == 0: + result1 = result + # concatenation with the input feature map + result = torch.cat((input, result), dim=1) + + if i == 1: + # concatenation with input feature map and feature map from first convolution + result = torch.cat((result1, result, input), dim=1) + i = i + 1 + + # if SELayer or Dropout layer defined put output through layer before returning + if self.se_layer is not None: + result = self.se_layer(result) + if self.dropout_layer is not None: + result = self.dropout_layer(result) + + return result, None + + +class Encoder(ConvConcatDenseBlock): + """ + Returns a convolution dense block for the encoding (down) part of a layer of the network. + This Encoder block downpools the data with max_pool. + Its output is used as input to the next layer down. + New feature: it returns the indices of the max_pool to the decoder (up) path + at the same layer to upsample the input. + + Args: + in_channels: number of input channels. + max_pool: predefined max_pool layer to downsample the data. + se_layer: Squeeze and Excite block to be included, defaults to None. + dropout: Dropout block to be included, defaults to None. + kernel_size : kernel size of the convolutional layers. Defaults to 5*5 + num_filters : number of input channels to each convolution block. Defaults to 64 + """ + + def __init__(self, in_channels: int, max_pool, se_layer, dropout, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.max_pool = max_pool + + def forward(self, input, indices=None): + input, indices = self.max_pool(input) + + out_block, _ = super().forward(input, None) + # safe the indices for unpool on decoder side + return out_block, indices + + +class Decoder(ConvConcatDenseBlock): + """ + Returns a convolution dense block for the decoding (up) part of a layer of the network. + This will upsample data with an unpool block before the forward. + It uses the indices from corresponding encoder on it's level. + Its output is used as input to the next layer up. + + Args: + in_channels: number of input channels. + un_pool: predefined unpool block. + se_layer: predefined SELayer. Defaults to None. + dropout: predefined dropout block. Defaults to None. + kernel_size: Kernel size of convolution layers. Defaults to 5*5. + num_filters: number of input channels to each convolution layer. Defaults to 64. + """ + + def __init__(self, in_channels: int, un_pool, se_layer, dropout, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.un_pool = un_pool + + def forward(self, input, indices): + out_block, _ = super().forward(input, None) + out_block = self.un_pool(out_block, indices) + return out_block, None + + +class Bottleneck(ConvConcatDenseBlock): + """ + Returns the bottom or bottleneck layer at the bottom of a network linking encoder to decoder halves. + It consists of a 5 * 5 convolutional layer and a batch normalization layer to separate + the encoder and decoder part of the network, restricting information flow between the encoder and decoder. + + Args: + in_channels: number of input channels. + se_layer: predefined SELayer. Defaults to None. + dropout: predefined dropout block. Defaults to None. + un_pool: predefined unpool block. + max_pool: predefined maxpool block. + kernel_size: Kernel size of convolution layers. Defaults to 5*5. + num_filters: number of input channels to each convolution layer. Defaults to 64. + """ + + def __init__(self, in_channels: int, se_layer, dropout, max_pool, un_pool, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.max_pool = max_pool + self.un_pool = un_pool + + def forward(self, input, indices): + out_block, indices = self.max_pool(input) + out_block, _ = super().forward(out_block, None) + out_block = self.un_pool(out_block, indices) + return out_block, None From d1448c5d7a739336d215776034c705f46385d979 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Fri, 24 Feb 2023 17:47:41 +0100 Subject: [PATCH 12/68] added classifier block Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/blocks/convolutions.py | 35 +++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/monai/networks/blocks/convolutions.py b/monai/networks/blocks/convolutions.py index 8b18614364..c64734159f 100644 --- a/monai/networks/blocks/convolutions.py +++ b/monai/networks/blocks/convolutions.py @@ -16,6 +16,7 @@ import numpy as np import torch import torch.nn as nn +import torch.nn.functional as F from monai.networks.blocks import ADN from monai.networks.layers.convutils import same_padding, stride_minus_kernel_padding @@ -316,3 +317,37 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: res: torch.Tensor = self.residual(x) # create the additive residual from x cx: torch.Tensor = self.conv(x) # apply x to sequence of operations return cx + res # add the residual to the output + + +class ClassifierBlock(Convolution): + """ + Returns a classifier block without an activation function at the top. + It consists of a 1 * 1 convolutional layer which maps the input to a num_class channel feature map. + The output is a probability map for each of the classes. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of classes to map to. + strides: convolution stride. Defaults to 1. + kernel_size: convolution kernel size. Defaults to 3. + adn_ordering: a string representing the ordering of activation, normalization, and dropout. + Defaults to "NDA". + act: activation type and arguments. Defaults to PReLU. + + """ + + def __init__(self, spatial_dims, in_channels, out_channels, strides, kernel_size, act=None, adn_ordering="A"): + super().__init__(spatial_dims, in_channels, out_channels, strides, kernel_size, adn_ordering, act) + + def forward(self, input: torch.Tensor, weights=None, indices=None): + _, channel, _, _ = input.size() + if weights is not None: + weights, _ = torch.max(weights, dim=0) + weights = weights.view(1, channel, 1, 1) + # use weights to adapt how the classes are weighted. + out_conv = F.conv2d(input, weights) + else: + out_conv = super().forward(input) + # no indices to return + return out_conv, None From 75ac8899464e4863c9cc4e0da507d75c7b2bced8 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Fri, 24 Feb 2023 17:48:41 +0100 Subject: [PATCH 13/68] added skipwithidx Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/layers/simplelayers.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index a1122ceaa2..dbde996d66 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -13,7 +13,7 @@ import math from copy import deepcopy -from typing import Sequence +from typing import Sequence, Union import torch import torch.nn.functional as F @@ -47,6 +47,7 @@ "Reshape", "SavitzkyGolayFilter", "SkipConnection", + "SkipConnectionWithIdx", "apply_filter", "median_filter", "separable_filtering", @@ -137,6 +138,29 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: raise NotImplementedError(f"Unsupported mode {self.mode}.") +class SkipConnectionWithIdx(SkipConnection): + """ + Combine the forward pass input with the result from the given submodule:: + + --+--submodule--o-- + |_____________| + + The available modes are ``"cat"``, ``"add"``, ``"mul"``. + Defaults to "cat" and dimension 1. + Inherits from SkipConnection but provides the indizes with each forward pass. + + """ + + def __init__(self, submodule, dim: int = 1, mode: Union[str, SkipMode] = "cat") -> None: + super().__init__(submodule, dim=dim, mode=mode) + + def forward(self, input, indices): + y, _ = self.submodule(input, None) + if self.mode == "cat": + output = torch.cat((input, y), dim=1) + return output, indices + + class Flatten(nn.Module): """ Flattens the given input in the forward pass to be [B,-1] in shape. From da124bcc27c8d20977f166e5c16401de627bdd97 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Fri, 24 Feb 2023 17:49:49 +0100 Subject: [PATCH 14/68] added Bottleneck, ConvConcatDenseBloc, Decoder, Encoder to init Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/blocks/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/blocks/__init__.py b/monai/networks/blocks/__init__.py index e67cb3376f..dca14c2d1c 100644 --- a/monai/networks/blocks/__init__.py +++ b/monai/networks/blocks/__init__.py @@ -17,7 +17,7 @@ from .backbone_fpn_utils import BackboneWithFPN from .convolutions import Convolution, ResidualUnit from .crf import CRF -from .denseblock import ConvDenseBlock, DenseBlock +from .denseblock import Bottleneck, ConvConcatDenseBlock, ConvDenseBlock, Decoder, DenseBlock, Encoder from .dints_block import ActiConvNormBlock, FactorizedIncreaseBlock, FactorizedReduceBlock, P3DActiConvNormBlock from .downsample import MaxAvgPool from .dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, UnetUpBlock, get_output_padding, get_padding From 33e15f2059c69b1edf07589bb7d03fdb077826f7 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Fri, 24 Feb 2023 17:50:36 +0100 Subject: [PATCH 15/68] SkipConnectionWithIdx to init Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/layers/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monai/networks/layers/__init__.py b/monai/networks/layers/__init__.py index d61ed57f7f..60ca213c25 100644 --- a/monai/networks/layers/__init__.py +++ b/monai/networks/layers/__init__.py @@ -31,6 +31,7 @@ SavitzkyGolayFilter, SharpenFilter, SkipConnection, + SkipConnectionWithIdx, apply_filter, median_filter, separable_filtering, From c7929b0bbdbcd01733520e26fd8b8c8480d7248a Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Fri, 24 Feb 2023 17:51:24 +0100 Subject: [PATCH 16/68] from .quicknat import Quicknat to init Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monai/networks/nets/__init__.py b/monai/networks/nets/__init__.py index 95ddad7842..84300a2b4d 100644 --- a/monai/networks/nets/__init__.py +++ b/monai/networks/nets/__init__.py @@ -51,6 +51,7 @@ from .hovernet import Hovernet, HoVernet, HoVerNet, HoverNet from .milmodel import MILModel from .netadapter import NetAdapter +from .quicknat import Quicknat from .regressor import Regressor from .regunet import GlobalNet, LocalNet, RegUNet from .resnet import ( From 9f3aba6ccd6ee85fc86c9cb0b7892a70171bfbe7 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Fri, 24 Feb 2023 17:51:46 +0100 Subject: [PATCH 17/68] final changes Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 195ac96bf2..54ead14309 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -197,7 +197,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input, _ = self.model(input, None) return input - +# Should go into a layers file but not clear which exact one. class SequentialWithIdx(nn.Sequential): """ A sequential container. From 0d2782de3228e6e6534be7a2840ed116c436c2f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CVanessa?= <“vanessa.gonzalezduque@ls2n.fr”> Date: Fri, 24 Feb 2023 18:31:12 +0100 Subject: [PATCH 18/68] ClassifierBlock imported from blocks.convolutions --- monai/networks/nets/quicknat.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 54ead14309..32a53a4037 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -17,7 +17,8 @@ import torch.nn as nn from squeeze_and_excitation import squeeze_and_excitation as se1 -from monai.networks.blocks import Bottleneck, ClassifierBlock, ConvConcatDenseBlock, Decoder, Encoder +from monai.networks.blocks import Bottleneck, ConvConcatDenseBlock, Decoder, Encoder +from monai.networks.blocks.convolutions import ClassifierBlock from monai.networks.blocks import squeeze_and_excitation as se from monai.networks.layers.factories import Act, Norm from monai.networks.layers.simplelayers import SkipConnectionWithIdx @@ -197,7 +198,9 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input, _ = self.model(input, None) return input -# Should go into a layers file but not clear which exact one. +# Should go into a layers file but not clear which exact one. + + class SequentialWithIdx(nn.Sequential): """ A sequential container. From 73688eb29a307f591f58049b3fb966f3252810fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CVanessa?= <“vanessa.gonzalezduque@ls2n.fr”> Date: Fri, 24 Feb 2023 18:57:19 +0100 Subject: [PATCH 19/68] Black reformating of Quicknat --- monai/networks/blocks/denseblock.py | 4 ++-- monai/networks/nets/quicknat.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index 7d8c2f93ce..13b365d566 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -172,7 +172,7 @@ def __init__( def _get_layer(self, in_channels, out_channels, dilation): """ - After ever convolutional layer the output is concatenated with the input and the layer before. + After ever convolutional layer the output is concatenated with the input and the layer before. The concatenated output is used as input to the next convolutional layer. Args: @@ -228,7 +228,7 @@ class Encoder(ConvConcatDenseBlock): Returns a convolution dense block for the encoding (down) part of a layer of the network. This Encoder block downpools the data with max_pool. Its output is used as input to the next layer down. - New feature: it returns the indices of the max_pool to the decoder (up) path + New feature: it returns the indices of the max_pool to the decoder (up) path at the same layer to upsample the input. Args: diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 32a53a4037..f54c6d1ef0 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -18,8 +18,8 @@ from squeeze_and_excitation import squeeze_and_excitation as se1 from monai.networks.blocks import Bottleneck, ConvConcatDenseBlock, Decoder, Encoder -from monai.networks.blocks.convolutions import ClassifierBlock from monai.networks.blocks import squeeze_and_excitation as se +from monai.networks.blocks.convolutions import ClassifierBlock from monai.networks.layers.factories import Act, Norm from monai.networks.layers.simplelayers import SkipConnectionWithIdx from monai.networks.layers.utils import get_dropout_layer, get_pool_layer @@ -198,6 +198,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: input, _ = self.model(input, None) return input + # Should go into a layers file but not clear which exact one. From c55aca820a626700740a2c8ac35bc8f7464d119f Mon Sep 17 00:00:00 2001 From: Vanessa Date: Wed, 5 Apr 2023 18:28:53 +0200 Subject: [PATCH 20/68] Update with dev branch --- .gitignore | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitignore b/.gitignore index bd117cc321..13b386b4fe 100644 --- a/.gitignore +++ b/.gitignore @@ -147,3 +147,9 @@ tests/testing_data/CT_2D_head_moving.mha # profiling results *.prof + +tests/testing_data/MNI152_T1_2mm.nii.gz + +*.gz + +*.pth From 734bc19ee20ff6a7f89f56158bd2cc28ad704722 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CVanessa?= <“vanessa.gonzalezduque@ls2n.fr”> Date: Thu, 6 Apr 2023 15:55:07 +0200 Subject: [PATCH 21/68] Squeeze import using monai.utils.optional_import --- monai/networks/nets/quicknat.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index f54c6d1ef0..1300e6d7ca 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -15,7 +15,6 @@ import torch import torch.nn as nn -from squeeze_and_excitation import squeeze_and_excitation as se1 from monai.networks.blocks import Bottleneck, ConvConcatDenseBlock, Decoder, Encoder from monai.networks.blocks import squeeze_and_excitation as se @@ -23,7 +22,7 @@ from monai.networks.layers.factories import Act, Norm from monai.networks.layers.simplelayers import SkipConnectionWithIdx from monai.networks.layers.utils import get_dropout_layer, get_pool_layer -from monai.utils import export +from monai.utils import export, optional_import __all__ = ["Quicknat"] @@ -167,6 +166,11 @@ def get_selayer(self, n_filters, se_block_type="None"): return se.ChannelSELayer(2, n_filters) # not implemented in squeeze_and_excitation in monai elif se_block_type == "SSE": + # Lazi import to avoid dependency + se1, flag = optional_import("squeeze_and_excitation") + # Throw error if squeeze_and_excitation is not installed + if not flag: + raise ImportError("Please install squeeze_and_excitation locally to use SpatialSELayer") return se1.SpatialSELayer(n_filters) elif se_block_type == "CSSE": From b5c27edc61165c451c4f642d349894a1f5b42bf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CVanessa?= <“vanessa.gonzalezduque@ls2n.fr”> Date: Thu, 6 Apr 2023 15:55:37 +0200 Subject: [PATCH 22/68] Ortography error --- monai/networks/nets/quicknat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 1300e6d7ca..bb4820f49a 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -166,7 +166,7 @@ def get_selayer(self, n_filters, se_block_type="None"): return se.ChannelSELayer(2, n_filters) # not implemented in squeeze_and_excitation in monai elif se_block_type == "SSE": - # Lazi import to avoid dependency + # Lazy import to avoid dependency se1, flag = optional_import("squeeze_and_excitation") # Throw error if squeeze_and_excitation is not installed if not flag: From bdc5b5266b2c0838eee66ba60cc20313abd50738 Mon Sep 17 00:00:00 2001 From: "vanessagd.2395" Date: Tue, 20 Jun 2023 14:30:39 +0200 Subject: [PATCH 23/68] Update .gitignore deleting absolut paths Signed-off-by: vanessagd.2395 --- .gitignore | 2 -- 1 file changed, 2 deletions(-) diff --git a/.gitignore b/.gitignore index 4ae6bbbab6..ee7ece6cf3 100644 --- a/.gitignore +++ b/.gitignore @@ -151,8 +151,6 @@ tests/testing_data/CT_2D_head_moving.mha *.prof runs -tests/testing_data/MNI152_T1_2mm.nii.gz - *.gz *.pth From 161161d58493a98c787211cc1b3e4bfd1919fb22 Mon Sep 17 00:00:00 2001 From: "vanessagd.2395" Date: Tue, 20 Jun 2023 14:32:14 +0200 Subject: [PATCH 24/68] Update monai/networks/blocks/backbone_fpn_utils.py Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: vanessagd.2395 --- monai/networks/blocks/backbone_fpn_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/blocks/backbone_fpn_utils.py b/monai/networks/blocks/backbone_fpn_utils.py index 19dc165a0a..04cf8a737b 100644 --- a/monai/networks/blocks/backbone_fpn_utils.py +++ b/monai/networks/blocks/backbone_fpn_utils.py @@ -214,6 +214,6 @@ def __init__( elif isinstance(backbone.conv1, nn.Conv3d): spatial_dims = 3 else: - raise ValueError("Could not find spatial_dims of backbone, please specify it.") + raise ValueError("Could not determine value of `spatial_dims` from backbone, please provide explicit value.") self.fpn = Daf3dFPN(spatial_dims, in_channels_list, out_channels, extra_blocks) From 65929b0e827c39ead822dcfa97b018cbfa2e1285 Mon Sep 17 00:00:00 2001 From: "vanessagd.2395" Date: Tue, 20 Jun 2023 14:38:36 +0200 Subject: [PATCH 25/68] Update convolutions.py Quicknat is not a 2D architecture Signed-off-by: vanessagd.2395 --- monai/networks/blocks/convolutions.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/monai/networks/blocks/convolutions.py b/monai/networks/blocks/convolutions.py index c64734159f..6975021b47 100644 --- a/monai/networks/blocks/convolutions.py +++ b/monai/networks/blocks/convolutions.py @@ -341,12 +341,15 @@ def __init__(self, spatial_dims, in_channels, out_channels, strides, kernel_size super().__init__(spatial_dims, in_channels, out_channels, strides, kernel_size, adn_ordering, act) def forward(self, input: torch.Tensor, weights=None, indices=None): - _, channel, _, _ = input.size() + _, channel, *dims = input.size() if weights is not None: weights, _ = torch.max(weights, dim=0) weights = weights.view(1, channel, 1, 1) # use weights to adapt how the classes are weighted. - out_conv = F.conv2d(input, weights) + if len(dims)==2: + out_conv = F.conv2d(input, weights) + else: + raise ValueError("Quicknat is a 2D architecture, please check your dimension.") else: out_conv = super().forward(input) # no indices to return From bc9dbc27b19d143344ffcd6ac89bfba2e606093e Mon Sep 17 00:00:00 2001 From: "vanessagd.2395" Date: Tue, 20 Jun 2023 14:42:42 +0200 Subject: [PATCH 26/68] Update monai/networks/blocks/denseblock.py Accepted changes Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: vanessagd.2395 --- monai/networks/blocks/denseblock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index 13b365d566..f17b16cafa 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -200,7 +200,7 @@ def forward(self, input, _): result = input for l in self.children(): # ignoring the max (un-)pool and droupout already added in the initial initialization step - if isinstance(l, nn.MaxPool2d) or isinstance(l, nn.MaxUnpool2d) or isinstance(l, nn.Dropout2d): + if isinstance(l, (nn.MaxPool2d, nn.MaxUnpool2d, nn.Dropout2d)): continue # first convolutional forward result = l(result) From cc12a672cfd6bc67ef4b9eff9068b2ab46a49e29 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Tue, 20 Jun 2023 14:49:57 +0200 Subject: [PATCH 27/68] Updated the constructor of ConvConcatDense the default value is now nn.Identity. this makes it possible to take out the None check in the forward method Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/blocks/denseblock.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index f17b16cafa..44dac97c46 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -153,8 +153,8 @@ class ConvConcatDenseBlock(ConvDenseBlock): def __init__( self, in_channels: int, - se_layer: nn.Module | None = None, - dropout_layer: type[nn.Dropout2d] | None = None, + se_layer: nn.Module | None = nn.Identity, + dropout_layer: type[nn.Dropout2d] | None = nn.Identity, kernel_size: Sequence[int] | int = 5, num_filters: int = 64, ): @@ -214,11 +214,9 @@ def forward(self, input, _): result = torch.cat((result1, result, input), dim=1) i = i + 1 - # if SELayer or Dropout layer defined put output through layer before returning - if self.se_layer is not None: - result = self.se_layer(result) - if self.dropout_layer is not None: - result = self.dropout_layer(result) + # if SELayer or Dropout layer defined put output through layer before returning, else it just goes through nn.Identity and the output does not change + result = self.se_layer(result) + result = self.dropout_layer(result) return result, None From 082f25d3b36a573d115d20bbd089cc6f792d6f0d Mon Sep 17 00:00:00 2001 From: "vanessagd.2395" Date: Tue, 20 Jun 2023 14:53:55 +0200 Subject: [PATCH 28/68] Update monai/networks/layers/simplelayers.py Co-authored-by: Eric Kerfoot <17726042+ericspod@users.noreply.github.com> Signed-off-by: vanessagd.2395 --- monai/networks/layers/simplelayers.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index dbde996d66..4a0ebfecad 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -155,10 +155,7 @@ def __init__(self, submodule, dim: int = 1, mode: Union[str, SkipMode] = "cat") super().__init__(submodule, dim=dim, mode=mode) def forward(self, input, indices): - y, _ = self.submodule(input, None) - if self.mode == "cat": - output = torch.cat((input, y), dim=1) - return output, indices + return super().forward(input), indices class Flatten(nn.Module): From 64be0586cac607489860bd76f7491d98a2fd171e Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:04:13 +0200 Subject: [PATCH 29/68] Update import squeeze_and_excitation Do not know what is meant by putting it in the top with the other methods Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index bb4820f49a..39d51bbbe6 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -164,18 +164,17 @@ def get_selayer(self, n_filters, se_block_type="None"): """ if se_block_type == "CSE": return se.ChannelSELayer(2, n_filters) - # not implemented in squeeze_and_excitation in monai - elif se_block_type == "SSE": + # not implemented in squeeze_and_excitation in monai use other squeeze_and_excitation import: + elif se_block_type == "SSE" or se_block_type == "CSSE": # Lazy import to avoid dependency se1, flag = optional_import("squeeze_and_excitation") # Throw error if squeeze_and_excitation is not installed if not flag: raise ImportError("Please install squeeze_and_excitation locally to use SpatialSELayer") - return se1.SpatialSELayer(n_filters) - - elif se_block_type == "CSSE": - # not implemented in monai - return se1.ChannelSpatialSELayer(n_filters) + if se_block_type == "SSE": + return se1.SpatialSELayer(n_filters) + else: + return se1.ChannelSpatialSELayer(n_filters) else: return None From 0eb7ee7a0f98ce01830a853067f99cfc72aace49 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 13:04:38 +0000 Subject: [PATCH 30/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/nets/quicknat.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 39d51bbbe6..1cead5a4db 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -164,16 +164,16 @@ def get_selayer(self, n_filters, se_block_type="None"): """ if se_block_type == "CSE": return se.ChannelSELayer(2, n_filters) - # not implemented in squeeze_and_excitation in monai use other squeeze_and_excitation import: + # not implemented in squeeze_and_excitation in monai use other squeeze_and_excitation import: elif se_block_type == "SSE" or se_block_type == "CSSE": # Lazy import to avoid dependency se1, flag = optional_import("squeeze_and_excitation") # Throw error if squeeze_and_excitation is not installed if not flag: raise ImportError("Please install squeeze_and_excitation locally to use SpatialSELayer") - if se_block_type == "SSE": + if se_block_type == "SSE": return se1.SpatialSELayer(n_filters) - else: + else: return se1.ChannelSpatialSELayer(n_filters) else: return None From 54c9683ebab2e5ac1caba8fb9d80644aea867d53 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Tue, 20 Jun 2023 15:08:26 +0200 Subject: [PATCH 31/68] rearranged daf3d blocks into daf3d main file --- monai/networks/blocks/aspp.py | 76 ---- monai/networks/blocks/backbone_fpn_utils.py | 48 +-- .../blocks/feature_pyramid_network.py | 71 +--- monai/networks/nets/daf3d.py | 361 +++++++++++++++++- monai/networks/nets/resnet.py | 148 ------- 5 files changed, 359 insertions(+), 345 deletions(-) diff --git a/monai/networks/blocks/aspp.py b/monai/networks/blocks/aspp.py index dad08cb75e..1f6c76c3af 100644 --- a/monai/networks/blocks/aspp.py +++ b/monai/networks/blocks/aspp.py @@ -16,7 +16,6 @@ import torch import torch.nn as nn -from monai.networks.blocks import ADN from monai.networks.blocks.convolutions import Convolution from monai.networks.layers import same_padding from monai.networks.layers.factories import Conv @@ -106,78 +105,3 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x_out = torch.cat([conv(x) for conv in self.convs], dim=1) x_out = self.conv_k1(x_out) return x_out - - -class Daf3dASPP(SimpleASPP): - """ - Atrous Spatial Pyramid Pooling module as used in 'Deep Attentive Features for Prostate Segmentation in - 3D Transrectal Ultrasound' . Core functionality as in SimpleASPP, but after each - layerwise convolution a group normalization is added. Further weight initialization for convolutions is provided in - _init_weight(). Additional possibility to specify the number of final output channels. - - Args: - spatial_dims: number of spatial dimensions, could be 1, 2, or 3. - in_channels: number of input channels. - conv_out_channels: number of output channels of each atrous conv. - out_channels: number of output channels of final convolution. - If None, uses len(kernel_sizes) * conv_out_channels - kernel_sizes: a sequence of four convolutional kernel sizes. - Defaults to (1, 3, 3, 3) for four (dilated) convolutions. - dilations: a sequence of four convolutional dilation parameters. - Defaults to (1, 2, 4, 6) for four (dilated) convolutions. - norm_type: final kernel-size-one convolution normalization type. - Defaults to batch norm. - acti_type: final kernel-size-one convolution activation type. - Defaults to leaky ReLU. - bias: whether to have a bias term in convolution blocks. Defaults to False. - According to `Performance Tuning Guide `_, - if a conv layer is directly followed by a batch norm layer, bias should be False. - - Raises: - ValueError: When ``kernel_sizes`` length differs from ``dilations``. - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - conv_out_channels: int, - out_channels: int | None = None, - kernel_sizes: Sequence[int] = (1, 3, 3, 3), - dilations: Sequence[int] = (1, 2, 4, 6), - norm_type: tuple | str | None = "BATCH", - acti_type: tuple | str | None = "LEAKYRELU", - bias: bool = False, - ) -> None: - super().__init__( - spatial_dims, in_channels, conv_out_channels, kernel_sizes, dilations, norm_type, acti_type, bias - ) - - # add normalization after each atrous convolution, initializes weights - new_convs = nn.ModuleList() - for _conv in self.convs: - tmp_conv = Convolution(1, 1, 1) - tmp_conv.conv = _conv - tmp_conv.adn = ADN(ordering="N", norm=norm_type, norm_dim=1) - tmp_conv = self._init_weight(tmp_conv) - new_convs.append(tmp_conv) - self.convs = new_convs - - # change final convolution to different out_channels - if out_channels is None: - out_channels = len(kernel_sizes) * conv_out_channels - - self.conv_k1 = Convolution( - spatial_dims=3, - in_channels=len(kernel_sizes) * conv_out_channels, - out_channels=out_channels, - kernel_size=1, - norm=norm_type, - act=acti_type, - ) - - def _init_weight(self, conv): - for m in conv.modules(): - if isinstance(m, nn.Conv3d): # true for conv.conv - torch.nn.init.kaiming_normal_(m.weight) - return conv diff --git a/monai/networks/blocks/backbone_fpn_utils.py b/monai/networks/blocks/backbone_fpn_utils.py index 04cf8a737b..824b31a83b 100644 --- a/monai/networks/blocks/backbone_fpn_utils.py +++ b/monai/networks/blocks/backbone_fpn_utils.py @@ -57,11 +57,11 @@ from monai.networks.nets import resnet from monai.utils import optional_import -from .feature_pyramid_network import Daf3dFPN, ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool +from .feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool torchvision_models, _ = optional_import("torchvision.models") -__all__ = ["BackboneWithFPN", "Daf3dBackboneWithFPN"] +__all__ = ["BackboneWithFPN"] class BackboneWithFPN(nn.Module): @@ -173,47 +173,3 @@ def _resnet_fpn_extractor( return BackboneWithFPN( backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks, spatial_dims=spatial_dims ) - - -class Daf3dBackboneWithFPN(BackboneWithFPN): - """ - Same as BackboneWithFPN but uses custom Daf3DFPN as feature pyramid network - - Args: - backbone: backbone network - return_layers: a dict containing the names - of the modules for which the activations will be returned as - the key of the dict, and the value of the dict is the name - of the returned activation (which the user can specify). - in_channels_list: number of channels for each feature map - that is returned, in the order they are present in the OrderedDict - out_channels: number of channels in the FPN. - spatial_dims: 2D or 3D images - extra_blocks: if provided, extra operations will - be performed. It is expected to take the fpn features, the original - features and the names of the original features as input, and returns - a new list of feature maps and their corresponding names - """ - - def __init__( - self, - backbone: nn.Module, - return_layers: dict[str, str], - in_channels_list: list[int], - out_channels: int, - spatial_dims: int | None = None, - extra_blocks: ExtraFPNBlock | None = None, - ) -> None: - super().__init__(backbone, return_layers, in_channels_list, out_channels, spatial_dims, extra_blocks) - - if spatial_dims is None: - if hasattr(backbone, "spatial_dims") and isinstance(backbone.spatial_dims, int): - spatial_dims = backbone.spatial_dims - elif isinstance(backbone.conv1, nn.Conv2d): - spatial_dims = 2 - elif isinstance(backbone.conv1, nn.Conv3d): - spatial_dims = 3 - else: - raise ValueError("Could not determine value of `spatial_dims` from backbone, please provide explicit value.") - - self.fpn = Daf3dFPN(spatial_dims, in_channels_list, out_channels, extra_blocks) diff --git a/monai/networks/blocks/feature_pyramid_network.py b/monai/networks/blocks/feature_pyramid_network.py index b0002971a2..cca7342078 100644 --- a/monai/networks/blocks/feature_pyramid_network.py +++ b/monai/networks/blocks/feature_pyramid_network.py @@ -58,10 +58,9 @@ import torch.nn.functional as F from torch import Tensor, nn -from monai.networks.blocks.convolutions import Convolution from monai.networks.layers.factories import Conv, Pool -__all__ = ["ExtraFPNBlock", "LastLevelMaxPool", "LastLevelP6P7", "FeaturePyramidNetwork", "Daf3dFPN"] +__all__ = ["ExtraFPNBlock", "LastLevelMaxPool", "LastLevelP6P7", "FeaturePyramidNetwork"] class ExtraFPNBlock(nn.Module): @@ -263,71 +262,3 @@ def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: out = OrderedDict(list(zip(names, results))) return out - - -class Daf3dFPN(FeaturePyramidNetwork): - """ - Feature Pyramid Network as used in 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' - . - Omits 3x3x3 convolution of layer_blocks and interpolates resulting feature maps to be the same size as - feature map with highest resolution. - - Args: - spatial_dims: 2D or 3D images - in_channels_list: number of channels for each feature map that is passed to the module - out_channels: number of channels of the FPN representation - extra_blocks: if provided, extra operations will be performed. - It is expected to take the fpn features, the original - features and the names of the original features as input, and returns - a new list of feature maps and their corresponding names - """ - - def __init__( - self, - spatial_dims: int, - in_channels_list: list[int], - out_channels: int, - extra_blocks: ExtraFPNBlock | None = None, - ): - super().__init__(spatial_dims, in_channels_list, out_channels, extra_blocks) - - self.inner_blocks = nn.ModuleList() - for in_channels in in_channels_list: - if in_channels == 0: - raise ValueError("in_channels=0 is currently not supported") - inner_block_module = Convolution( - spatial_dims, - in_channels, - out_channels, - kernel_size=1, - adn_ordering="NA", - act="PRELU", - norm=("group", {"num_groups": 32, "num_channels": 128}), - ) - self.inner_blocks.append(inner_block_module) - - def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: - # unpack OrderedDict into two lists for easier handling - names = list(x.keys()) - x_values: list[Tensor] = list(x.values()) - - last_inner = self.get_result_from_inner_blocks(x_values[-1], -1) - results = [] - results.append(last_inner) - - for idx in range(len(x_values) - 2, -1, -1): - inner_lateral = self.get_result_from_inner_blocks(x_values[idx], idx) - feat_shape = inner_lateral.shape[2:] - inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="trilinear") - last_inner = inner_lateral + inner_top_down - results.insert(0, last_inner) - - if self.extra_blocks is not None: - results, names = self.extra_blocks(results, x_values, names) - - # bring all layers to same size - results = [results[0]] + [F.interpolate(l, size=x["feat1"].size()[2:], mode="trilinear") for l in results[1:]] - # make it back an OrderedDict - out = OrderedDict(list(zip(names, results))) - - return out diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index d51ede626e..fb3de4b6c9 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -12,16 +12,33 @@ from __future__ import annotations +from collections import OrderedDict +from collections.abc import Callable, Sequence + import torch import torch.nn as nn import torch.nn.functional as F +from torch import Tensor -from monai.networks.blocks.aspp import Daf3dASPP -from monai.networks.blocks.backbone_fpn_utils import Daf3dBackboneWithFPN +from monai.networks.blocks import ADN +from monai.networks.blocks.aspp import SimpleASPP +from monai.networks.blocks.backbone_fpn_utils import BackboneWithFPN from monai.networks.blocks.convolutions import Convolution -from monai.networks.nets.resnet import Daf3dResNet - -__all__ = ["AttentionModule", "Daf3dBackbone", "DAF3D"] +from monai.networks.blocks.feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork +from monai.networks.layers.factories import Conv, Norm +from monai.networks.nets.resnet import ResNet, ResNetBottleneck + +__all__ = [ + "AttentionModule", + "Daf3dASPP", + "Daf3dResNetBottleneck", + "Daf3dResNetDilatedBottleneck", + "Daf3dResNet", + "Daf3dBackbone", + "Daf3dFPN", + "Daf3dBackboneWithFPN", + "DAF3D", +] class AttentionModule(nn.Module): @@ -66,6 +83,226 @@ def forward(self, slf, mlf): return (out, att) +class Daf3dASPP(SimpleASPP): + """ + Atrous Spatial Pyramid Pooling module as used in 'Deep Attentive Features for Prostate Segmentation in + 3D Transrectal Ultrasound' . Core functionality as in SimpleASPP, but after each + layerwise convolution a group normalization is added. Further weight initialization for convolutions is provided in + _init_weight(). Additional possibility to specify the number of final output channels. + + Args: + spatial_dims: number of spatial dimensions, could be 1, 2, or 3. + in_channels: number of input channels. + conv_out_channels: number of output channels of each atrous conv. + out_channels: number of output channels of final convolution. + If None, uses len(kernel_sizes) * conv_out_channels + kernel_sizes: a sequence of four convolutional kernel sizes. + Defaults to (1, 3, 3, 3) for four (dilated) convolutions. + dilations: a sequence of four convolutional dilation parameters. + Defaults to (1, 2, 4, 6) for four (dilated) convolutions. + norm_type: final kernel-size-one convolution normalization type. + Defaults to batch norm. + acti_type: final kernel-size-one convolution activation type. + Defaults to leaky ReLU. + bias: whether to have a bias term in convolution blocks. Defaults to False. + According to `Performance Tuning Guide `_, + if a conv layer is directly followed by a batch norm layer, bias should be False. + + Raises: + ValueError: When ``kernel_sizes`` length differs from ``dilations``. + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + conv_out_channels: int, + out_channels: int | None = None, + kernel_sizes: Sequence[int] = (1, 3, 3, 3), + dilations: Sequence[int] = (1, 2, 4, 6), + norm_type: tuple | str | None = "BATCH", + acti_type: tuple | str | None = "LEAKYRELU", + bias: bool = False, + ) -> None: + super().__init__( + spatial_dims, in_channels, conv_out_channels, kernel_sizes, dilations, norm_type, acti_type, bias + ) + + # add normalization after each atrous convolution, initializes weights + new_convs = nn.ModuleList() + for _conv in self.convs: + tmp_conv = Convolution(1, 1, 1) + tmp_conv.conv = _conv + tmp_conv.adn = ADN(ordering="N", norm=norm_type, norm_dim=1) + tmp_conv = self._init_weight(tmp_conv) + new_convs.append(tmp_conv) + self.convs = new_convs + + # change final convolution to different out_channels + if out_channels is None: + out_channels = len(kernel_sizes) * conv_out_channels + + self.conv_k1 = Convolution( + spatial_dims=3, + in_channels=len(kernel_sizes) * conv_out_channels, + out_channels=out_channels, + kernel_size=1, + norm=norm_type, + act=acti_type, + ) + + def _init_weight(self, conv): + for m in conv.modules(): + if isinstance(m, nn.Conv3d): # true for conv.conv + torch.nn.init.kaiming_normal_(m.weight) + return conv + + +class Daf3dResNetBottleneck(ResNetBottleneck): + """ + ResNetBottleneck block as used in 'Deep Attentive Features for Prostate Segmentation in 3D + Transrectal Ultrasound' . + Instead of Batch Norm Group Norm is used, instead of ReLU PReLU activation is used. + Initial expansion is 2 instead of 4 and second convolution uses groups. + + Args: + in_planes: number of input channels. + planes: number of output channels (taking expansion into account). + spatial_dims: number of spatial dimensions of the input image. + stride: stride to use for second conv layer. + downsample: which downsample layer to use. + """ + + expansion = 2 + + def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): + norm_type: Callable = Norm[Norm.GROUP, spatial_dims] + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + + # in case downsample uses batch norm, change to group norm + if isinstance(downsample, nn.Sequential): + downsample = nn.Sequential( + conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False), + norm_type(num_groups=32, num_channels=planes * self.expansion), + ) + + super().__init__(in_planes, planes, spatial_dims, stride, downsample) + + # change norm from batch to group norm + self.bn1 = norm_type(num_groups=32, num_channels=planes) + self.bn2 = norm_type(num_groups=32, num_channels=planes) + self.bn3 = norm_type(num_groups=32, num_channels=planes * self.expansion) + + # adapt second convolution to work with groups + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False) + + # adapt activation function + self.relu = nn.PReLU() + + +class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck): + """ + ResNetDilatedBottleneck as used in 'Deep Attentive Features for Prostate Segmentation in 3D + Transrectal Ultrasound' . + Same as Daf3dResNetBottleneck but dilation of 2 is used in second convolution. + Args: + in_planes: number of input channels. + planes: number of output channels (taking expansion into account). + spatial_dims: number of spatial dimensions of the input image. + stride: stride to use for second conv layer. + downsample: which downsample layer to use. + """ + + def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): + super().__init__(in_planes, planes, spatial_dims, stride, downsample) + + # add dilation in second convolution + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + self.conv2 = conv_type( + planes, planes, kernel_size=3, stride=stride, padding=2, dilation=2, groups=32, bias=False + ) + + +class Daf3dResNet(ResNet): + """ + ResNet as used in 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' + . + Uses two Daf3dResNetBottleneck blocks followed by two Daf3dResNetDilatedBottleneck blocks. + + Args: + layers: how many layers to use. + block_inplanes: determine the size of planes at each step. Also tunable with widen_factor. + spatial_dims: number of spatial dimensions of the input image. + n_input_channels: number of input channels for first convolutional layer. + conv1_t_size: size of first convolution layer, determines kernel and padding. + conv1_t_stride: stride of first convolution layer. + no_max_pool: bool argument to determine if to use maxpool layer. + shortcut_type: which downsample block to use. Options are 'A', 'B', default to 'B'. + - 'A': using `self._downsample_basic_block`. + - 'B': kernel_size 1 conv + norm. + widen_factor: widen output for each layer. + num_classes: number of output (classifications). + feed_forward: whether to add the FC layer for the output, default to `True`. + bias_downsample: whether to use bias term in the downsampling block when `shortcut_type` is 'B', default to `True`. + + """ + + def __init__( + self, + layers: list[int], + block_inplanes: list[int], + spatial_dims: int = 3, + n_input_channels: int = 3, + conv1_t_size: tuple[int] | int = 7, + conv1_t_stride: tuple[int] | int = 1, + no_max_pool: bool = False, + shortcut_type: str = "B", + widen_factor: float = 1.0, + num_classes: int = 400, + feed_forward: bool = True, + bias_downsample: bool = True, # for backwards compatibility (also see PR #5477) + ): + super().__init__( + ResNetBottleneck, + layers, + block_inplanes, + spatial_dims, + n_input_channels, + conv1_t_size, + conv1_t_stride, + no_max_pool, + shortcut_type, + widen_factor, + num_classes, + feed_forward, + bias_downsample, + ) + + self.in_planes = 64 + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.GROUP, spatial_dims] + + # adapt first convolution to work with new in_planes + self.conv1 = conv_type( + n_input_channels, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False + ) + self.bn1 = norm_type(32, 64) + self.relu = nn.PReLU() + + # adapt layers to our needs + self.layer1 = self._make_layer(Daf3dResNetBottleneck, block_inplanes[0], layers[0], spatial_dims, shortcut_type) + self.layer2 = self._make_layer( + Daf3dResNetBottleneck, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=(1, 2, 2) + ) + self.layer3 = self._make_layer( + Daf3dResNetDilatedBottleneck, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=1 + ) + self.layer4 = self._make_layer( + Daf3dResNetDilatedBottleneck, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=1 + ) + + class Daf3dBackbone(nn.Module): """ Backbone for 3D Feature Pyramid Network in DAF3D module based on 'Deep Attentive Features for Prostate Segmentation in @@ -100,6 +337,120 @@ def forward(self, x): return layer4 +class Daf3dFPN(FeaturePyramidNetwork): + """ + Feature Pyramid Network as used in 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' + . + Omits 3x3x3 convolution of layer_blocks and interpolates resulting feature maps to be the same size as + feature map with highest resolution. + + Args: + spatial_dims: 2D or 3D images + in_channels_list: number of channels for each feature map that is passed to the module + out_channels: number of channels of the FPN representation + extra_blocks: if provided, extra operations will be performed. + It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + """ + + def __init__( + self, + spatial_dims: int, + in_channels_list: list[int], + out_channels: int, + extra_blocks: ExtraFPNBlock | None = None, + ): + super().__init__(spatial_dims, in_channels_list, out_channels, extra_blocks) + + self.inner_blocks = nn.ModuleList() + for in_channels in in_channels_list: + if in_channels == 0: + raise ValueError("in_channels=0 is currently not supported") + inner_block_module = Convolution( + spatial_dims, + in_channels, + out_channels, + kernel_size=1, + adn_ordering="NA", + act="PRELU", + norm=("group", {"num_groups": 32, "num_channels": 128}), + ) + self.inner_blocks.append(inner_block_module) + + def forward(self, x: dict[str, Tensor]) -> dict[str, Tensor]: + # unpack OrderedDict into two lists for easier handling + names = list(x.keys()) + x_values: list[Tensor] = list(x.values()) + + last_inner = self.get_result_from_inner_blocks(x_values[-1], -1) + results = [] + results.append(last_inner) + + for idx in range(len(x_values) - 2, -1, -1): + inner_lateral = self.get_result_from_inner_blocks(x_values[idx], idx) + feat_shape = inner_lateral.shape[2:] + inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="trilinear") + last_inner = inner_lateral + inner_top_down + results.insert(0, last_inner) + + if self.extra_blocks is not None: + results, names = self.extra_blocks(results, x_values, names) + + # bring all layers to same size + results = [results[0]] + [F.interpolate(l, size=x["feat1"].size()[2:], mode="trilinear") for l in results[1:]] + # make it back an OrderedDict + out = OrderedDict(list(zip(names, results))) + + return out + + +class Daf3dBackboneWithFPN(BackboneWithFPN): + """ + Same as BackboneWithFPN but uses custom Daf3DFPN as feature pyramid network + + Args: + backbone: backbone network + return_layers: a dict containing the names + of the modules for which the activations will be returned as + the key of the dict, and the value of the dict is the name + of the returned activation (which the user can specify). + in_channels_list: number of channels for each feature map + that is returned, in the order they are present in the OrderedDict + out_channels: number of channels in the FPN. + spatial_dims: 2D or 3D images + extra_blocks: if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + """ + + def __init__( + self, + backbone: nn.Module, + return_layers: dict[str, str], + in_channels_list: list[int], + out_channels: int, + spatial_dims: int | None = None, + extra_blocks: ExtraFPNBlock | None = None, + ) -> None: + super().__init__(backbone, return_layers, in_channels_list, out_channels, spatial_dims, extra_blocks) + + if spatial_dims is None: + if hasattr(backbone, "spatial_dims") and isinstance(backbone.spatial_dims, int): + spatial_dims = backbone.spatial_dims + elif isinstance(backbone.conv1, nn.Conv2d): + spatial_dims = 2 + elif isinstance(backbone.conv1, nn.Conv3d): + spatial_dims = 3 + else: + raise ValueError( + "Could not determine value of `spatial_dims` from backbone, please provide explicit value." + ) + + self.fpn = Daf3dFPN(spatial_dims, in_channels_list, out_channels, extra_blocks) + + class DAF3D(nn.Module): """ DAF3D network based on 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' diff --git a/monai/networks/nets/resnet.py b/monai/networks/nets/resnet.py index 33ae4465ae..02869d415f 100644 --- a/monai/networks/nets/resnet.py +++ b/monai/networks/nets/resnet.py @@ -34,9 +34,6 @@ "resnet101", "resnet152", "resnet200", - "Daf3dResNetBottleneck", - "Daf3dResNetDilatedBottleneck", - "Daf3dResNet", ] @@ -157,71 +154,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return out -class Daf3dResNetBottleneck(ResNetBottleneck): - """ - ResNetBottleneck block as used in 'Deep Attentive Features for Prostate Segmentation in 3D - Transrectal Ultrasound' . - Instead of Batch Norm Group Norm is used, instead of ReLU PReLU activation is used. - Initial expansion is 2 instead of 4 and second convolution uses groups. - - Args: - in_planes: number of input channels. - planes: number of output channels (taking expansion into account). - spatial_dims: number of spatial dimensions of the input image. - stride: stride to use for second conv layer. - downsample: which downsample layer to use. - """ - - expansion = 2 - - def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): - norm_type: Callable = Norm[Norm.GROUP, spatial_dims] - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - - # in case downsample uses batch norm, change to group norm - if isinstance(downsample, nn.Sequential): - downsample = nn.Sequential( - conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False), - norm_type(num_groups=32, num_channels=planes * self.expansion), - ) - - super().__init__(in_planes, planes, spatial_dims, stride, downsample) - - # change norm from batch to group norm - self.bn1 = norm_type(num_groups=32, num_channels=planes) - self.bn2 = norm_type(num_groups=32, num_channels=planes) - self.bn3 = norm_type(num_groups=32, num_channels=planes * self.expansion) - - # adapt second convolution to work with groups - self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False) - - # adapt activation function - self.relu = nn.PReLU() - - -class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck): - """ - ResNetDilatedBottleneck as used in 'Deep Attentive Features for Prostate Segmentation in 3D - Transrectal Ultrasound' . - Same as Daf3dResNetBottleneck but dilation of 2 is used in second convolution. - Args: - in_planes: number of input channels. - planes: number of output channels (taking expansion into account). - spatial_dims: number of spatial dimensions of the input image. - stride: stride to use for second conv layer. - downsample: which downsample layer to use. - """ - - def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None): - super().__init__(in_planes, planes, spatial_dims, stride, downsample) - - # add dilation in second convolution - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - self.conv2 = conv_type( - planes, planes, kernel_size=3, stride=stride, padding=2, dilation=2, groups=32, bias=False - ) - - class ResNet(nn.Module): """ ResNet based on: `Deep Residual Learning for Image Recognition `_ @@ -495,83 +427,3 @@ def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> progress (bool): If True, displays a progress bar of the download to stderr """ return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs) - - -class Daf3dResNet(ResNet): - """ - ResNet as used in 'Deep Attentive Features for Prostate Segmentation in 3D Transrectal Ultrasound' - . - Uses two Daf3dResNetBottleneck blocks followed by two Daf3dResNetDilatedBottleneck blocks. - - Args: - layers: how many layers to use. - block_inplanes: determine the size of planes at each step. Also tunable with widen_factor. - spatial_dims: number of spatial dimensions of the input image. - n_input_channels: number of input channels for first convolutional layer. - conv1_t_size: size of first convolution layer, determines kernel and padding. - conv1_t_stride: stride of first convolution layer. - no_max_pool: bool argument to determine if to use maxpool layer. - shortcut_type: which downsample block to use. Options are 'A', 'B', default to 'B'. - - 'A': using `self._downsample_basic_block`. - - 'B': kernel_size 1 conv + norm. - widen_factor: widen output for each layer. - num_classes: number of output (classifications). - feed_forward: whether to add the FC layer for the output, default to `True`. - bias_downsample: whether to use bias term in the downsampling block when `shortcut_type` is 'B', default to `True`. - - """ - - def __init__( - self, - layers: list[int], - block_inplanes: list[int], - spatial_dims: int = 3, - n_input_channels: int = 3, - conv1_t_size: tuple[int] | int = 7, - conv1_t_stride: tuple[int] | int = 1, - no_max_pool: bool = False, - shortcut_type: str = "B", - widen_factor: float = 1.0, - num_classes: int = 400, - feed_forward: bool = True, - bias_downsample: bool = True, # for backwards compatibility (also see PR #5477) - ): - super().__init__( - ResNetBottleneck, - layers, - block_inplanes, - spatial_dims, - n_input_channels, - conv1_t_size, - conv1_t_stride, - no_max_pool, - shortcut_type, - widen_factor, - num_classes, - feed_forward, - bias_downsample, - ) - - self.in_planes = 64 - - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - norm_type: Callable = Norm[Norm.GROUP, spatial_dims] - - # adapt first convolution to work with new in_planes - self.conv1 = conv_type( - n_input_channels, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False - ) - self.bn1 = norm_type(32, 64) - self.relu = nn.PReLU() - - # adapt layers to our needs - self.layer1 = self._make_layer(Daf3dResNetBottleneck, block_inplanes[0], layers[0], spatial_dims, shortcut_type) - self.layer2 = self._make_layer( - Daf3dResNetBottleneck, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=(1, 2, 2) - ) - self.layer3 = self._make_layer( - Daf3dResNetDilatedBottleneck, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=1 - ) - self.layer4 = self._make_layer( - Daf3dResNetDilatedBottleneck, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=1 - ) From aab1dcf6da70d8ddeedcf49ac6a47882729f6f14 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:18:28 +0200 Subject: [PATCH 32/68] set the optional import to the other imports at the beginning Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 1cead5a4db..7e04e2d4c2 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -23,6 +23,8 @@ from monai.networks.layers.simplelayers import SkipConnectionWithIdx from monai.networks.layers.utils import get_dropout_layer, get_pool_layer from monai.utils import export, optional_import +# Lazy import to avoid dependency +se1, flag = optional_import("squeeze_and_excitation") __all__ = ["Quicknat"] @@ -166,8 +168,6 @@ def get_selayer(self, n_filters, se_block_type="None"): return se.ChannelSELayer(2, n_filters) # not implemented in squeeze_and_excitation in monai use other squeeze_and_excitation import: elif se_block_type == "SSE" or se_block_type == "CSSE": - # Lazy import to avoid dependency - se1, flag = optional_import("squeeze_and_excitation") # Throw error if squeeze_and_excitation is not installed if not flag: raise ImportError("Please install squeeze_and_excitation locally to use SpatialSELayer") From c0a144fc618b57ff687cfe9d4550b0fe989f83f6 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Tue, 20 Jun 2023 15:26:09 +0200 Subject: [PATCH 33/68] fixed codeformat --- monai/networks/blocks/convolutions.py | 2 +- monai/networks/blocks/denseblock.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/monai/networks/blocks/convolutions.py b/monai/networks/blocks/convolutions.py index 6975021b47..61562cf958 100644 --- a/monai/networks/blocks/convolutions.py +++ b/monai/networks/blocks/convolutions.py @@ -346,7 +346,7 @@ def forward(self, input: torch.Tensor, weights=None, indices=None): weights, _ = torch.max(weights, dim=0) weights = weights.view(1, channel, 1, 1) # use weights to adapt how the classes are weighted. - if len(dims)==2: + if len(dims) == 2: out_conv = F.conv2d(input, weights) else: raise ValueError("Quicknat is a 2D architecture, please check your dimension.") diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index 44dac97c46..e9713486b2 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -214,7 +214,8 @@ def forward(self, input, _): result = torch.cat((result1, result, input), dim=1) i = i + 1 - # if SELayer or Dropout layer defined put output through layer before returning, else it just goes through nn.Identity and the output does not change + # if SELayer or Dropout layer defined put output through layer before returning, + # else it just goes through nn.Identity and the output does not change result = self.se_layer(result) result = self.dropout_layer(result) From 9adb4d3cd34cd70ed8e8f3160aaf7e8ba5249746 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:33:30 +0200 Subject: [PATCH 34/68] deleted @export("monai.networks.nets") from quicknat.py Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 7e04e2d4c2..aeceb2c008 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -28,8 +28,6 @@ __all__ = ["Quicknat"] - -@export("monai.networks.nets") class Quicknat(nn.Module): """ Model for "Quick segmentation of NeuroAnaTomy (QuickNAT) based on a deep fully convolutional neural network. From 5f88e73d6edead08749ffdbfbee380f81c015c47 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 13:33:56 +0000 Subject: [PATCH 35/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/nets/quicknat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index aeceb2c008..d726a11639 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -22,7 +22,7 @@ from monai.networks.layers.factories import Act, Norm from monai.networks.layers.simplelayers import SkipConnectionWithIdx from monai.networks.layers.utils import get_dropout_layer, get_pool_layer -from monai.utils import export, optional_import +from monai.utils import optional_import # Lazy import to avoid dependency se1, flag = optional_import("squeeze_and_excitation") From c2ee13f11d82d4188fb458f1886e799f806c1fed Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Tue, 20 Jun 2023 15:40:35 +0200 Subject: [PATCH 36/68] fixed codeformat --- monai/networks/nets/quicknat.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index aeceb2c008..848a072bdf 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -22,12 +22,14 @@ from monai.networks.layers.factories import Act, Norm from monai.networks.layers.simplelayers import SkipConnectionWithIdx from monai.networks.layers.utils import get_dropout_layer, get_pool_layer -from monai.utils import export, optional_import +from monai.utils import optional_import + # Lazy import to avoid dependency se1, flag = optional_import("squeeze_and_excitation") __all__ = ["Quicknat"] + class Quicknat(nn.Module): """ Model for "Quick segmentation of NeuroAnaTomy (QuickNAT) based on a deep fully convolutional neural network. From f1a4740eebd5b811ab6534868940c1dd82858a00 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 09:59:39 +0200 Subject: [PATCH 37/68] excluding ClassifierBlock in convolutions.py Adding ClassifierBlock into QuickNAT file Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/blocks/convolutions.py | 37 --------------------------- 1 file changed, 37 deletions(-) diff --git a/monai/networks/blocks/convolutions.py b/monai/networks/blocks/convolutions.py index 61562cf958..d234529345 100644 --- a/monai/networks/blocks/convolutions.py +++ b/monai/networks/blocks/convolutions.py @@ -317,40 +317,3 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: res: torch.Tensor = self.residual(x) # create the additive residual from x cx: torch.Tensor = self.conv(x) # apply x to sequence of operations return cx + res # add the residual to the output - - -class ClassifierBlock(Convolution): - """ - Returns a classifier block without an activation function at the top. - It consists of a 1 * 1 convolutional layer which maps the input to a num_class channel feature map. - The output is a probability map for each of the classes. - - Args: - spatial_dims: number of spatial dimensions. - in_channels: number of input channels. - out_channels: number of classes to map to. - strides: convolution stride. Defaults to 1. - kernel_size: convolution kernel size. Defaults to 3. - adn_ordering: a string representing the ordering of activation, normalization, and dropout. - Defaults to "NDA". - act: activation type and arguments. Defaults to PReLU. - - """ - - def __init__(self, spatial_dims, in_channels, out_channels, strides, kernel_size, act=None, adn_ordering="A"): - super().__init__(spatial_dims, in_channels, out_channels, strides, kernel_size, adn_ordering, act) - - def forward(self, input: torch.Tensor, weights=None, indices=None): - _, channel, *dims = input.size() - if weights is not None: - weights, _ = torch.max(weights, dim=0) - weights = weights.view(1, channel, 1, 1) - # use weights to adapt how the classes are weighted. - if len(dims) == 2: - out_conv = F.conv2d(input, weights) - else: - raise ValueError("Quicknat is a 2D architecture, please check your dimension.") - else: - out_conv = super().forward(input) - # no indices to return - return out_conv, None From 34d4031c68e91b3483fa893af32cd9aa7330c7b2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 21 Jun 2023 08:00:07 +0000 Subject: [PATCH 38/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/blocks/convolutions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/monai/networks/blocks/convolutions.py b/monai/networks/blocks/convolutions.py index d234529345..8b18614364 100644 --- a/monai/networks/blocks/convolutions.py +++ b/monai/networks/blocks/convolutions.py @@ -16,7 +16,6 @@ import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F from monai.networks.blocks import ADN from monai.networks.layers.convutils import same_padding, stride_minus_kernel_padding From 5c54a70cb319d00e4d4b26ab00d5fd5d0b7adc22 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:01:48 +0200 Subject: [PATCH 39/68] Included ClassifiertBlock into quicknat.py Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 39 ++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 14d78b1d3b..5df9e3fa5c 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -18,7 +18,6 @@ from monai.networks.blocks import Bottleneck, ConvConcatDenseBlock, Decoder, Encoder from monai.networks.blocks import squeeze_and_excitation as se -from monai.networks.blocks.convolutions import ClassifierBlock from monai.networks.layers.factories import Act, Norm from monai.networks.layers.simplelayers import SkipConnectionWithIdx from monai.networks.layers.utils import get_dropout_layer, get_pool_layer @@ -219,3 +218,41 @@ def forward(self, input, indices): for module in self: input, indices = module(input, indices) return input, indices + + +class ClassifierBlock(Convolution): + """ + Returns a classifier block without an activation function at the top. + It consists of a 1 * 1 convolutional layer which maps the input to a num_class channel feature map. + The output is a probability map for each of the classes. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of classes to map to. + strides: convolution stride. Defaults to 1. + kernel_size: convolution kernel size. Defaults to 3. + adn_ordering: a string representing the ordering of activation, normalization, and dropout. + Defaults to "NDA". + act: activation type and arguments. Defaults to PReLU. + + """ + + def __init__(self, spatial_dims, in_channels, out_channels, strides, kernel_size, act=None, adn_ordering="A"): + super().__init__(spatial_dims, in_channels, out_channels, strides, kernel_size, adn_ordering, act) + + def forward(self, input: torch.Tensor, weights=None, indices=None): + _, channel, *dims = input.size() + if weights is not None: + weights, _ = torch.max(weights, dim=0) + weights = weights.view(1, channel, 1, 1) + # use weights to adapt how the classes are weighted. + if len(dims) == 2: + out_conv = F.conv2d(input, weights) + else: + raise ValueError("Quicknat is a 2D architecture, please check your dimension.") + else: + out_conv = super().forward(input) + # no indices to return + return out_conv, None + From 55f2f5c8ff29436e3c137774a1bb88eb0f321451 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 21 Jun 2023 08:02:14 +0000 Subject: [PATCH 40/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/nets/quicknat.py | 1 - 1 file changed, 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 5df9e3fa5c..b0f71bbdb9 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -255,4 +255,3 @@ def forward(self, input: torch.Tensor, weights=None, indices=None): out_conv = super().forward(input) # no indices to return return out_conv, None - From 994a71c4e511c9537061508e04c6ef0b669ce587 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:04:43 +0200 Subject: [PATCH 41/68] Included SkipConnectionWithIdx into Quicknat using Erics suggestion for a cleaner implementation Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index b0f71bbdb9..d64205992c 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -200,8 +200,19 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return input -# Should go into a layers file but not clear which exact one. +# QuickNAT specific Blocks +class SkipConnectionWithIdx(SkipConnection): + """ + Combine the forward pass input with the result from the given submodule:: + --+--submodule--o-- + |_____________| + The available modes are ``"cat"``, ``"add"``, ``"mul"``. + Defaults to "cat" and dimension 1. + Inherits from SkipConnection but provides the indizes with each forward pass. + """ + def forward(self, input, indices): + return super().forward(input), indices class SequentialWithIdx(nn.Sequential): """ @@ -255,3 +266,4 @@ def forward(self, input: torch.Tensor, weights=None, indices=None): out_conv = super().forward(input) # no indices to return return out_conv, None + From 69aa7b0a8fd09ca78348cacdc17b9ee8d3e983bf Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:05:36 +0200 Subject: [PATCH 42/68] took out all quicknat specific blocks Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/blocks/denseblock.py | 172 ---------------------------- 1 file changed, 172 deletions(-) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index e9713486b2..9f0e1a7b39 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -131,175 +131,3 @@ def _get_layer(self, in_channels, out_channels, dilation): ) -class ConvConcatDenseBlock(ConvDenseBlock): - """ - This dense block is defined as a sequence of 'Convolution' blocks. It overwrite the '_get_layer' methodto change the ordering of - Every convolutional layer is preceded by a batch-normalization layer and a Rectifier Linear Unit (ReLU) layer. - The first two convolutional layers are followed by a concatenation layer that concatenates - the input feature map with outputs of the current and previous convolutional blocks. - Kernel size of two convolutional layers kept small to limit number of paramters. - Appropriate padding is provided so that the size of feature maps before and after convolution remains constant. - The output channels for each convolution layer is set to 64, which acts as a bottle- neck for feature map selectivity. - The input channel size is variable, depending on the number of dense connections. - The third convolutional layer is also preceded by a batch normalization and ReLU, - but has a 1 * 1 kernel size to compress the feature map size to 64. - Args: - in_channles: variable depending on depth of the network - seLayer: Squeeze and Excite block to be included, defaults to None, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, - dropout_layer: Dropout block to be included, defaults to None. - :return: forward passed tensor - """ - - def __init__( - self, - in_channels: int, - se_layer: nn.Module | None = nn.Identity, - dropout_layer: type[nn.Dropout2d] | None = nn.Identity, - kernel_size: Sequence[int] | int = 5, - num_filters: int = 64, - ): - self.count = 0 - super().__init__( - in_channels=in_channels, - spatial_dims=2, - # number of channels stay constant throughout the convolution layers - channels=[num_filters, num_filters, num_filters], - norm=("instance", {"num_features": in_channels}), - kernel_size=kernel_size, - ) - self.se_layer = se_layer - self.dropout_layer = dropout_layer - - def _get_layer(self, in_channels, out_channels, dilation): - """ - After ever convolutional layer the output is concatenated with the input and the layer before. - The concatenated output is used as input to the next convolutional layer. - - Args: - in_channels: number of input channels. - out_channels: number of output channels. - strides: convolution stride. - is_top: True if this is the top block. - """ - kernelsize = self.kernel_size if self.count < 2 else (1, 1) - # padding = None if self.count < 2 else (0, 0) - self.count += 1 - conv = Convolution( - spatial_dims=self.spatial_dims, - in_channels=in_channels, - out_channels=out_channels, - strides=1, - kernel_size=kernelsize, - act=self.act, - norm=("instance", {"num_features": in_channels}), - ) - return nn.Sequential(conv.get_submodule("adn"), conv.get_submodule("conv")) - - def forward(self, input, _): - i = 0 - result = input - for l in self.children(): - # ignoring the max (un-)pool and droupout already added in the initial initialization step - if isinstance(l, (nn.MaxPool2d, nn.MaxUnpool2d, nn.Dropout2d)): - continue - # first convolutional forward - result = l(result) - if i == 0: - result1 = result - # concatenation with the input feature map - result = torch.cat((input, result), dim=1) - - if i == 1: - # concatenation with input feature map and feature map from first convolution - result = torch.cat((result1, result, input), dim=1) - i = i + 1 - - # if SELayer or Dropout layer defined put output through layer before returning, - # else it just goes through nn.Identity and the output does not change - result = self.se_layer(result) - result = self.dropout_layer(result) - - return result, None - - -class Encoder(ConvConcatDenseBlock): - """ - Returns a convolution dense block for the encoding (down) part of a layer of the network. - This Encoder block downpools the data with max_pool. - Its output is used as input to the next layer down. - New feature: it returns the indices of the max_pool to the decoder (up) path - at the same layer to upsample the input. - - Args: - in_channels: number of input channels. - max_pool: predefined max_pool layer to downsample the data. - se_layer: Squeeze and Excite block to be included, defaults to None. - dropout: Dropout block to be included, defaults to None. - kernel_size : kernel size of the convolutional layers. Defaults to 5*5 - num_filters : number of input channels to each convolution block. Defaults to 64 - """ - - def __init__(self, in_channels: int, max_pool, se_layer, dropout, kernel_size, num_filters): - super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) - self.max_pool = max_pool - - def forward(self, input, indices=None): - input, indices = self.max_pool(input) - - out_block, _ = super().forward(input, None) - # safe the indices for unpool on decoder side - return out_block, indices - - -class Decoder(ConvConcatDenseBlock): - """ - Returns a convolution dense block for the decoding (up) part of a layer of the network. - This will upsample data with an unpool block before the forward. - It uses the indices from corresponding encoder on it's level. - Its output is used as input to the next layer up. - - Args: - in_channels: number of input channels. - un_pool: predefined unpool block. - se_layer: predefined SELayer. Defaults to None. - dropout: predefined dropout block. Defaults to None. - kernel_size: Kernel size of convolution layers. Defaults to 5*5. - num_filters: number of input channels to each convolution layer. Defaults to 64. - """ - - def __init__(self, in_channels: int, un_pool, se_layer, dropout, kernel_size, num_filters): - super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) - self.un_pool = un_pool - - def forward(self, input, indices): - out_block, _ = super().forward(input, None) - out_block = self.un_pool(out_block, indices) - return out_block, None - - -class Bottleneck(ConvConcatDenseBlock): - """ - Returns the bottom or bottleneck layer at the bottom of a network linking encoder to decoder halves. - It consists of a 5 * 5 convolutional layer and a batch normalization layer to separate - the encoder and decoder part of the network, restricting information flow between the encoder and decoder. - - Args: - in_channels: number of input channels. - se_layer: predefined SELayer. Defaults to None. - dropout: predefined dropout block. Defaults to None. - un_pool: predefined unpool block. - max_pool: predefined maxpool block. - kernel_size: Kernel size of convolution layers. Defaults to 5*5. - num_filters: number of input channels to each convolution layer. Defaults to 64. - """ - - def __init__(self, in_channels: int, se_layer, dropout, max_pool, un_pool, kernel_size, num_filters): - super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) - self.max_pool = max_pool - self.un_pool = un_pool - - def forward(self, input, indices): - out_block, indices = self.max_pool(input) - out_block, _ = super().forward(out_block, None) - out_block = self.un_pool(out_block, indices) - return out_block, None From 3c51089ba32cd3c859ec346c8587f5781f258e9f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 21 Jun 2023 08:05:57 +0000 Subject: [PATCH 43/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/nets/quicknat.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index d64205992c..f70aa9684b 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -200,7 +200,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return input -# QuickNAT specific Blocks +# QuickNAT specific Blocks class SkipConnectionWithIdx(SkipConnection): """ @@ -266,4 +266,3 @@ def forward(self, input: torch.Tensor, weights=None, indices=None): out_conv = super().forward(input) # no indices to return return out_conv, None - From 06415476d796043e69bffe9722fa3650066c1915 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:17:23 +0200 Subject: [PATCH 44/68] Quicknat specific blocks included Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 178 +++++++++++++++++++++++++++++++- 1 file changed, 176 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index f70aa9684b..7e2cb348a6 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -16,10 +16,10 @@ import torch import torch.nn as nn -from monai.networks.blocks import Bottleneck, ConvConcatDenseBlock, Decoder, Encoder +from monai.networks.blocks import ConvDenseBlock from monai.networks.blocks import squeeze_and_excitation as se from monai.networks.layers.factories import Act, Norm -from monai.networks.layers.simplelayers import SkipConnectionWithIdx +from monai.networks.layers.simplelayers import SkipConnection from monai.networks.layers.utils import get_dropout_layer, get_pool_layer from monai.utils import optional_import # Lazy import to avoid dependency @@ -266,3 +266,177 @@ def forward(self, input: torch.Tensor, weights=None, indices=None): out_conv = super().forward(input) # no indices to return return out_conv, None + +# Quicknat specific blocks. All blocks inherit from MONAI blocks but have adaptions to their structure +class ConvConcatDenseBlock(ConvDenseBlock): + """ + This dense block is defined as a sequence of 'Convolution' blocks. It overwrite the '_get_layer' methodto change the ordering of + Every convolutional layer is preceded by a batch-normalization layer and a Rectifier Linear Unit (ReLU) layer. + The first two convolutional layers are followed by a concatenation layer that concatenates + the input feature map with outputs of the current and previous convolutional blocks. + Kernel size of two convolutional layers kept small to limit number of paramters. + Appropriate padding is provided so that the size of feature maps before and after convolution remains constant. + The output channels for each convolution layer is set to 64, which acts as a bottle- neck for feature map selectivity. + The input channel size is variable, depending on the number of dense connections. + The third convolutional layer is also preceded by a batch normalization and ReLU, + but has a 1 * 1 kernel size to compress the feature map size to 64. + Args: + in_channles: variable depending on depth of the network + seLayer: Squeeze and Excite block to be included, defaults to None, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, + dropout_layer: Dropout block to be included, defaults to None. + :return: forward passed tensor + """ + + def __init__( + self, + in_channels: int, + se_layer: nn.Module | None = nn.Identity, + dropout_layer: type[nn.Dropout2d] | None = nn.Identity, + kernel_size: Sequence[int] | int = 5, + num_filters: int = 64, + ): + self.count = 0 + super().__init__( + in_channels=in_channels, + spatial_dims=2, + # number of channels stay constant throughout the convolution layers + channels=[num_filters, num_filters, num_filters], + norm=("instance", {"num_features": in_channels}), + kernel_size=kernel_size, + ) + self.se_layer = se_layer + self.dropout_layer = dropout_layer + + def _get_layer(self, in_channels, out_channels, dilation): + """ + After ever convolutional layer the output is concatenated with the input and the layer before. + The concatenated output is used as input to the next convolutional layer. + + Args: + in_channels: number of input channels. + out_channels: number of output channels. + strides: convolution stride. + is_top: True if this is the top block. + """ + kernelsize = self.kernel_size if self.count < 2 else (1, 1) + # padding = None if self.count < 2 else (0, 0) + self.count += 1 + conv = Convolution( + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=1, + kernel_size=kernelsize, + act=self.act, + norm=("instance", {"num_features": in_channels}), + ) + return nn.Sequential(conv.get_submodule("adn"), conv.get_submodule("conv")) + + def forward(self, input, _): + i = 0 + result = input + for l in self.children(): + # ignoring the max (un-)pool and droupout already added in the initial initialization step + if isinstance(l, (nn.MaxPool2d, nn.MaxUnpool2d, nn.Dropout2d)): + continue + # first convolutional forward + result = l(result) + if i == 0: + result1 = result + # concatenation with the input feature map + result = torch.cat((input, result), dim=1) + + if i == 1: + # concatenation with input feature map and feature map from first convolution + result = torch.cat((result1, result, input), dim=1) + i = i + 1 + + # if SELayer or Dropout layer defined put output through layer before returning, + # else it just goes through nn.Identity and the output does not change + result = self.se_layer(result) + result = self.dropout_layer(result) + + return result, None + + + class Encoder(ConvConcatDenseBlock): + """ + Returns a convolution dense block for the encoding (down) part of a layer of the network. + This Encoder block downpools the data with max_pool. + Its output is used as input to the next layer down. + New feature: it returns the indices of the max_pool to the decoder (up) path + at the same layer to upsample the input. + + Args: + in_channels: number of input channels. + max_pool: predefined max_pool layer to downsample the data. + se_layer: Squeeze and Excite block to be included, defaults to None. + dropout: Dropout block to be included, defaults to None. + kernel_size : kernel size of the convolutional layers. Defaults to 5*5 + num_filters : number of input channels to each convolution block. Defaults to 64 + """ + + def __init__(self, in_channels: int, max_pool, se_layer, dropout, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.max_pool = max_pool + + def forward(self, input, indices=None): + input, indices = self.max_pool(input) + + out_block, _ = super().forward(input, None) + # safe the indices for unpool on decoder side + return out_block, indices + + + class Decoder(ConvConcatDenseBlock): + """ + Returns a convolution dense block for the decoding (up) part of a layer of the network. + This will upsample data with an unpool block before the forward. + It uses the indices from corresponding encoder on it's level. + Its output is used as input to the next layer up. + + Args: + in_channels: number of input channels. + un_pool: predefined unpool block. + se_layer: predefined SELayer. Defaults to None. + dropout: predefined dropout block. Defaults to None. + kernel_size: Kernel size of convolution layers. Defaults to 5*5. + num_filters: number of input channels to each convolution layer. Defaults to 64. + """ + + def __init__(self, in_channels: int, un_pool, se_layer, dropout, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.un_pool = un_pool + + def forward(self, input, indices): + out_block, _ = super().forward(input, None) + out_block = self.un_pool(out_block, indices) + return out_block, None + + + class Bottleneck(ConvConcatDenseBlock): + """ + Returns the bottom or bottleneck layer at the bottom of a network linking encoder to decoder halves. + It consists of a 5 * 5 convolutional layer and a batch normalization layer to separate + the encoder and decoder part of the network, restricting information flow between the encoder and decoder. + + Args: + in_channels: number of input channels. + se_layer: predefined SELayer. Defaults to None. + dropout: predefined dropout block. Defaults to None. + un_pool: predefined unpool block. + max_pool: predefined maxpool block. + kernel_size: Kernel size of convolution layers. Defaults to 5*5. + num_filters: number of input channels to each convolution layer. Defaults to 64. + """ + + def __init__(self, in_channels: int, se_layer, dropout, max_pool, un_pool, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.max_pool = max_pool + self.un_pool = un_pool + + def forward(self, input, indices): + out_block, indices = self.max_pool(input) + out_block, _ = super().forward(out_block, None) + out_block = self.un_pool(out_block, indices) + return out_block, None From 73cc2db903eaa684fb2837e690fdd90b6b491206 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 21 Jun 2023 08:18:02 +0000 Subject: [PATCH 45/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/blocks/denseblock.py | 2 -- monai/networks/nets/quicknat.py | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index 9f0e1a7b39..908c21dc9e 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -129,5 +129,3 @@ def _get_layer(self, in_channels, out_channels, dilation): dilation=dilation, bias=self.bias, ) - - diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 7e2cb348a6..5f3c29e7df 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -266,8 +266,8 @@ def forward(self, input: torch.Tensor, weights=None, indices=None): out_conv = super().forward(input) # no indices to return return out_conv, None - -# Quicknat specific blocks. All blocks inherit from MONAI blocks but have adaptions to their structure + +# Quicknat specific blocks. All blocks inherit from MONAI blocks but have adaptions to their structure class ConvConcatDenseBlock(ConvDenseBlock): """ This dense block is defined as a sequence of 'Convolution' blocks. It overwrite the '_get_layer' methodto change the ordering of From fcd3b48a6fc981fc57a088026fa80ba274306c92 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:19:05 +0200 Subject: [PATCH 46/68] small adaptions to import Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 5f3c29e7df..829b9414eb 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -16,7 +16,7 @@ import torch import torch.nn as nn -from monai.networks.blocks import ConvDenseBlock +from monai.networks.blocks import ConvDenseBlock, Convolution from monai.networks.blocks import squeeze_and_excitation as se from monai.networks.layers.factories import Act, Norm from monai.networks.layers.simplelayers import SkipConnection From fbe7db58a51c1546d938b4246f7d6bc3447fef56 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:31:48 +0200 Subject: [PATCH 47/68] deleted SkipConnectionsWithIdx Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/layers/simplelayers.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 4a0ebfecad..3cc72a2db5 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -47,7 +47,6 @@ "Reshape", "SavitzkyGolayFilter", "SkipConnection", - "SkipConnectionWithIdx", "apply_filter", "median_filter", "separable_filtering", @@ -138,26 +137,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: raise NotImplementedError(f"Unsupported mode {self.mode}.") -class SkipConnectionWithIdx(SkipConnection): - """ - Combine the forward pass input with the result from the given submodule:: - - --+--submodule--o-- - |_____________| - - The available modes are ``"cat"``, ``"add"``, ``"mul"``. - Defaults to "cat" and dimension 1. - Inherits from SkipConnection but provides the indizes with each forward pass. - - """ - - def __init__(self, submodule, dim: int = 1, mode: Union[str, SkipMode] = "cat") -> None: - super().__init__(submodule, dim=dim, mode=mode) - - def forward(self, input, indices): - return super().forward(input), indices - - class Flatten(nn.Module): """ Flattens the given input in the forward pass to be [B,-1] in shape. From aca8b312c6d77520f9f41aa0491d46824cbfdc75 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 21 Jun 2023 08:33:00 +0000 Subject: [PATCH 48/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/layers/simplelayers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 3cc72a2db5..a1122ceaa2 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -13,7 +13,7 @@ import math from copy import deepcopy -from typing import Sequence, Union +from typing import Sequence import torch import torch.nn.functional as F From 321e13e88f18bef8b529fd0fe36bc1ca18f1f348 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:33:47 +0200 Subject: [PATCH 49/68] deleted old reverences to quicknat specific blocks denseblock.py Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/blocks/denseblock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/blocks/denseblock.py b/monai/networks/blocks/denseblock.py index 908c21dc9e..f5d799d5e7 100644 --- a/monai/networks/blocks/denseblock.py +++ b/monai/networks/blocks/denseblock.py @@ -19,7 +19,7 @@ from monai.networks.blocks import Convolution, ResidualUnit from monai.networks.layers.factories import Act, Norm -__ALL__ = ["DenseBlock", "ConvDenseBlock", "ConvConcatDenseBlock", "Bottleneck", "Encoder", "Decoder"] +__ALL__ = ["DenseBlock", "ConvDenseBlock"] class DenseBlock(nn.Sequential): From 5cfd78f2e881fa0b23ee12c7f28ba436caff8bd9 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Wed, 21 Jun 2023 10:34:37 +0200 Subject: [PATCH 50/68] deleted SkipConnectionWithIdx from __init__.py as it now is in quicknat.py Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/layers/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/monai/networks/layers/__init__.py b/monai/networks/layers/__init__.py index 60ca213c25..d61ed57f7f 100644 --- a/monai/networks/layers/__init__.py +++ b/monai/networks/layers/__init__.py @@ -31,7 +31,6 @@ SavitzkyGolayFilter, SharpenFilter, SkipConnection, - SkipConnectionWithIdx, apply_filter, median_filter, separable_filtering, From e4d88b26ae7b23b6d623f6c3c88c870d2a48a255 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Wed, 21 Jun 2023 11:25:26 +0200 Subject: [PATCH 51/68] codeformat and block initialization according to new class arrangement --- monai/networks/blocks/__init__.py | 2 +- monai/networks/nets/quicknat.py | 489 +++++++++++++++--------------- 2 files changed, 248 insertions(+), 243 deletions(-) diff --git a/monai/networks/blocks/__init__.py b/monai/networks/blocks/__init__.py index dca14c2d1c..e67cb3376f 100644 --- a/monai/networks/blocks/__init__.py +++ b/monai/networks/blocks/__init__.py @@ -17,7 +17,7 @@ from .backbone_fpn_utils import BackboneWithFPN from .convolutions import Convolution, ResidualUnit from .crf import CRF -from .denseblock import Bottleneck, ConvConcatDenseBlock, ConvDenseBlock, Decoder, DenseBlock, Encoder +from .denseblock import ConvDenseBlock, DenseBlock from .dints_block import ActiConvNormBlock, FactorizedIncreaseBlock, FactorizedReduceBlock, P3DActiConvNormBlock from .downsample import MaxAvgPool from .dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, UnetUpBlock, get_output_padding, get_padding diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 829b9414eb..3dfee964f3 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -15,6 +15,7 @@ import torch import torch.nn as nn +import torch.nn.functional as F from monai.networks.blocks import ConvDenseBlock, Convolution from monai.networks.blocks import squeeze_and_excitation as se @@ -22,11 +23,257 @@ from monai.networks.layers.simplelayers import SkipConnection from monai.networks.layers.utils import get_dropout_layer, get_pool_layer from monai.utils import optional_import + # Lazy import to avoid dependency se1, flag = optional_import("squeeze_and_excitation") __all__ = ["Quicknat"] +# QuickNAT specific Blocks + + +class SkipConnectionWithIdx(SkipConnection): + """ + Combine the forward pass input with the result from the given submodule:: + --+--submodule--o-- + |_____________| + The available modes are ``"cat"``, ``"add"``, ``"mul"``. + Defaults to "cat" and dimension 1. + Inherits from SkipConnection but provides the indizes with each forward pass. + """ + + def forward(self, input, indices): + return super().forward(input), indices + + +class SequentialWithIdx(nn.Sequential): + """ + A sequential container. + Modules will be added to it in the order they are passed in the + constructor. + Own implementation to work with the new indices in the forward pass. + """ + + def __init__(self, *args): + super().__init__(*args) + + def forward(self, input, indices): + for module in self: + input, indices = module(input, indices) + return input, indices + + +class ClassifierBlock(Convolution): + """ + Returns a classifier block without an activation function at the top. + It consists of a 1 * 1 convolutional layer which maps the input to a num_class channel feature map. + The output is a probability map for each of the classes. + + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of classes to map to. + strides: convolution stride. Defaults to 1. + kernel_size: convolution kernel size. Defaults to 3. + adn_ordering: a string representing the ordering of activation, normalization, and dropout. + Defaults to "NDA". + act: activation type and arguments. Defaults to PReLU. + + """ + + def __init__(self, spatial_dims, in_channels, out_channels, strides, kernel_size, act=None, adn_ordering="A"): + super().__init__(spatial_dims, in_channels, out_channels, strides, kernel_size, adn_ordering, act) + + def forward(self, input: torch.Tensor, weights=None, indices=None): + _, channel, *dims = input.size() + if weights is not None: + weights, _ = torch.max(weights, dim=0) + weights = weights.view(1, channel, 1, 1) + # use weights to adapt how the classes are weighted. + if len(dims) == 2: + out_conv = F.conv2d(input, weights) + else: + raise ValueError("Quicknat is a 2D architecture, please check your dimension.") + else: + out_conv = super().forward(input) + # no indices to return + return out_conv, None + + +# Quicknat specific blocks. All blocks inherit from MONAI blocks but have adaptions to their structure +class ConvConcatDenseBlock(ConvDenseBlock): + """ + This dense block is defined as a sequence of 'Convolution' blocks. It overwrite the '_get_layer' methodto change the ordering of + Every convolutional layer is preceded by a batch-normalization layer and a Rectifier Linear Unit (ReLU) layer. + The first two convolutional layers are followed by a concatenation layer that concatenates + the input feature map with outputs of the current and previous convolutional blocks. + Kernel size of two convolutional layers kept small to limit number of paramters. + Appropriate padding is provided so that the size of feature maps before and after convolution remains constant. + The output channels for each convolution layer is set to 64, which acts as a bottle- neck for feature map selectivity. + The input channel size is variable, depending on the number of dense connections. + The third convolutional layer is also preceded by a batch normalization and ReLU, + but has a 1 * 1 kernel size to compress the feature map size to 64. + Args: + in_channles: variable depending on depth of the network + seLayer: Squeeze and Excite block to be included, defaults to None, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, + dropout_layer: Dropout block to be included, defaults to None. + :return: forward passed tensor + """ + + def __init__( + self, + in_channels: int, + se_layer: nn.Module | None = nn.Identity, + dropout_layer: type[nn.Dropout2d] | None = nn.Identity, + kernel_size: Sequence[int] | int = 5, + num_filters: int = 64, + ): + self.count = 0 + super().__init__( + in_channels=in_channels, + spatial_dims=2, + # number of channels stay constant throughout the convolution layers + channels=[num_filters, num_filters, num_filters], + norm=("instance", {"num_features": in_channels}), + kernel_size=kernel_size, + ) + self.se_layer = se_layer + self.dropout_layer = dropout_layer + + def _get_layer(self, in_channels, out_channels, dilation): + """ + After ever convolutional layer the output is concatenated with the input and the layer before. + The concatenated output is used as input to the next convolutional layer. + + Args: + in_channels: number of input channels. + out_channels: number of output channels. + strides: convolution stride. + is_top: True if this is the top block. + """ + kernelsize = self.kernel_size if self.count < 2 else (1, 1) + # padding = None if self.count < 2 else (0, 0) + self.count += 1 + conv = Convolution( + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=1, + kernel_size=kernelsize, + act=self.act, + norm=("instance", {"num_features": in_channels}), + ) + return nn.Sequential(conv.get_submodule("adn"), conv.get_submodule("conv")) + + def forward(self, input, _): + i = 0 + result = input + for l in self.children(): + # ignoring the max (un-)pool and droupout already added in the initial initialization step + if isinstance(l, (nn.MaxPool2d, nn.MaxUnpool2d, nn.Dropout2d)): + continue + # first convolutional forward + result = l(result) + if i == 0: + result1 = result + # concatenation with the input feature map + result = torch.cat((input, result), dim=1) + + if i == 1: + # concatenation with input feature map and feature map from first convolution + result = torch.cat((result1, result, input), dim=1) + i = i + 1 + + # if SELayer or Dropout layer defined put output through layer before returning, + # else it just goes through nn.Identity and the output does not change + result = self.se_layer(result) + result = self.dropout_layer(result) + + return result, None + + +class Encoder(ConvConcatDenseBlock): + """ + Returns a convolution dense block for the encoding (down) part of a layer of the network. + This Encoder block downpools the data with max_pool. + Its output is used as input to the next layer down. + New feature: it returns the indices of the max_pool to the decoder (up) path + at the same layer to upsample the input. + + Args: + in_channels: number of input channels. + max_pool: predefined max_pool layer to downsample the data. + se_layer: Squeeze and Excite block to be included, defaults to None. + dropout: Dropout block to be included, defaults to None. + kernel_size : kernel size of the convolutional layers. Defaults to 5*5 + num_filters : number of input channels to each convolution block. Defaults to 64 + """ + + def __init__(self, in_channels: int, max_pool, se_layer, dropout, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.max_pool = max_pool + + def forward(self, input, indices=None): + input, indices = self.max_pool(input) + + out_block, _ = super().forward(input, None) + # safe the indices for unpool on decoder side + return out_block, indices + + +class Decoder(ConvConcatDenseBlock): + """ + Returns a convolution dense block for the decoding (up) part of a layer of the network. + This will upsample data with an unpool block before the forward. + It uses the indices from corresponding encoder on it's level. + Its output is used as input to the next layer up. + + Args: + in_channels: number of input channels. + un_pool: predefined unpool block. + se_layer: predefined SELayer. Defaults to None. + dropout: predefined dropout block. Defaults to None. + kernel_size: Kernel size of convolution layers. Defaults to 5*5. + num_filters: number of input channels to each convolution layer. Defaults to 64. + """ + + def __init__(self, in_channels: int, un_pool, se_layer, dropout, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.un_pool = un_pool + + def forward(self, input, indices): + out_block, _ = super().forward(input, None) + out_block = self.un_pool(out_block, indices) + return out_block, None + + +class Bottleneck(ConvConcatDenseBlock): + """ + Returns the bottom or bottleneck layer at the bottom of a network linking encoder to decoder halves. + It consists of a 5 * 5 convolutional layer and a batch normalization layer to separate + the encoder and decoder part of the network, restricting information flow between the encoder and decoder. + + Args: + in_channels: number of input channels. + se_layer: predefined SELayer. Defaults to None. + dropout: predefined dropout block. Defaults to None. + un_pool: predefined unpool block. + max_pool: predefined maxpool block. + kernel_size: Kernel size of convolution layers. Defaults to 5*5. + num_filters: number of input channels to each convolution layer. Defaults to 64. + """ + + def __init__(self, in_channels: int, se_layer, dropout, max_pool, un_pool, kernel_size, num_filters): + super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) + self.max_pool = max_pool + self.un_pool = un_pool + + def forward(self, input, indices): + out_block, indices = self.max_pool(input) + out_block, _ = super().forward(out_block, None) + out_block = self.un_pool(out_block, indices) + return out_block, None + class Quicknat(nn.Module): """ @@ -198,245 +445,3 @@ def is_cuda(self): def forward(self, input: torch.Tensor) -> torch.Tensor: input, _ = self.model(input, None) return input - - -# QuickNAT specific Blocks - -class SkipConnectionWithIdx(SkipConnection): - """ - Combine the forward pass input with the result from the given submodule:: - --+--submodule--o-- - |_____________| - The available modes are ``"cat"``, ``"add"``, ``"mul"``. - Defaults to "cat" and dimension 1. - Inherits from SkipConnection but provides the indizes with each forward pass. - """ - def forward(self, input, indices): - return super().forward(input), indices - -class SequentialWithIdx(nn.Sequential): - """ - A sequential container. - Modules will be added to it in the order they are passed in the - constructor. - Own implementation to work with the new indices in the forward pass. - """ - - def __init__(self, *args): - super().__init__(*args) - - def forward(self, input, indices): - for module in self: - input, indices = module(input, indices) - return input, indices - - -class ClassifierBlock(Convolution): - """ - Returns a classifier block without an activation function at the top. - It consists of a 1 * 1 convolutional layer which maps the input to a num_class channel feature map. - The output is a probability map for each of the classes. - - Args: - spatial_dims: number of spatial dimensions. - in_channels: number of input channels. - out_channels: number of classes to map to. - strides: convolution stride. Defaults to 1. - kernel_size: convolution kernel size. Defaults to 3. - adn_ordering: a string representing the ordering of activation, normalization, and dropout. - Defaults to "NDA". - act: activation type and arguments. Defaults to PReLU. - - """ - - def __init__(self, spatial_dims, in_channels, out_channels, strides, kernel_size, act=None, adn_ordering="A"): - super().__init__(spatial_dims, in_channels, out_channels, strides, kernel_size, adn_ordering, act) - - def forward(self, input: torch.Tensor, weights=None, indices=None): - _, channel, *dims = input.size() - if weights is not None: - weights, _ = torch.max(weights, dim=0) - weights = weights.view(1, channel, 1, 1) - # use weights to adapt how the classes are weighted. - if len(dims) == 2: - out_conv = F.conv2d(input, weights) - else: - raise ValueError("Quicknat is a 2D architecture, please check your dimension.") - else: - out_conv = super().forward(input) - # no indices to return - return out_conv, None - -# Quicknat specific blocks. All blocks inherit from MONAI blocks but have adaptions to their structure -class ConvConcatDenseBlock(ConvDenseBlock): - """ - This dense block is defined as a sequence of 'Convolution' blocks. It overwrite the '_get_layer' methodto change the ordering of - Every convolutional layer is preceded by a batch-normalization layer and a Rectifier Linear Unit (ReLU) layer. - The first two convolutional layers are followed by a concatenation layer that concatenates - the input feature map with outputs of the current and previous convolutional blocks. - Kernel size of two convolutional layers kept small to limit number of paramters. - Appropriate padding is provided so that the size of feature maps before and after convolution remains constant. - The output channels for each convolution layer is set to 64, which acts as a bottle- neck for feature map selectivity. - The input channel size is variable, depending on the number of dense connections. - The third convolutional layer is also preceded by a batch normalization and ReLU, - but has a 1 * 1 kernel size to compress the feature map size to 64. - Args: - in_channles: variable depending on depth of the network - seLayer: Squeeze and Excite block to be included, defaults to None, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, - dropout_layer: Dropout block to be included, defaults to None. - :return: forward passed tensor - """ - - def __init__( - self, - in_channels: int, - se_layer: nn.Module | None = nn.Identity, - dropout_layer: type[nn.Dropout2d] | None = nn.Identity, - kernel_size: Sequence[int] | int = 5, - num_filters: int = 64, - ): - self.count = 0 - super().__init__( - in_channels=in_channels, - spatial_dims=2, - # number of channels stay constant throughout the convolution layers - channels=[num_filters, num_filters, num_filters], - norm=("instance", {"num_features": in_channels}), - kernel_size=kernel_size, - ) - self.se_layer = se_layer - self.dropout_layer = dropout_layer - - def _get_layer(self, in_channels, out_channels, dilation): - """ - After ever convolutional layer the output is concatenated with the input and the layer before. - The concatenated output is used as input to the next convolutional layer. - - Args: - in_channels: number of input channels. - out_channels: number of output channels. - strides: convolution stride. - is_top: True if this is the top block. - """ - kernelsize = self.kernel_size if self.count < 2 else (1, 1) - # padding = None if self.count < 2 else (0, 0) - self.count += 1 - conv = Convolution( - spatial_dims=self.spatial_dims, - in_channels=in_channels, - out_channels=out_channels, - strides=1, - kernel_size=kernelsize, - act=self.act, - norm=("instance", {"num_features": in_channels}), - ) - return nn.Sequential(conv.get_submodule("adn"), conv.get_submodule("conv")) - - def forward(self, input, _): - i = 0 - result = input - for l in self.children(): - # ignoring the max (un-)pool and droupout already added in the initial initialization step - if isinstance(l, (nn.MaxPool2d, nn.MaxUnpool2d, nn.Dropout2d)): - continue - # first convolutional forward - result = l(result) - if i == 0: - result1 = result - # concatenation with the input feature map - result = torch.cat((input, result), dim=1) - - if i == 1: - # concatenation with input feature map and feature map from first convolution - result = torch.cat((result1, result, input), dim=1) - i = i + 1 - - # if SELayer or Dropout layer defined put output through layer before returning, - # else it just goes through nn.Identity and the output does not change - result = self.se_layer(result) - result = self.dropout_layer(result) - - return result, None - - - class Encoder(ConvConcatDenseBlock): - """ - Returns a convolution dense block for the encoding (down) part of a layer of the network. - This Encoder block downpools the data with max_pool. - Its output is used as input to the next layer down. - New feature: it returns the indices of the max_pool to the decoder (up) path - at the same layer to upsample the input. - - Args: - in_channels: number of input channels. - max_pool: predefined max_pool layer to downsample the data. - se_layer: Squeeze and Excite block to be included, defaults to None. - dropout: Dropout block to be included, defaults to None. - kernel_size : kernel size of the convolutional layers. Defaults to 5*5 - num_filters : number of input channels to each convolution block. Defaults to 64 - """ - - def __init__(self, in_channels: int, max_pool, se_layer, dropout, kernel_size, num_filters): - super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) - self.max_pool = max_pool - - def forward(self, input, indices=None): - input, indices = self.max_pool(input) - - out_block, _ = super().forward(input, None) - # safe the indices for unpool on decoder side - return out_block, indices - - - class Decoder(ConvConcatDenseBlock): - """ - Returns a convolution dense block for the decoding (up) part of a layer of the network. - This will upsample data with an unpool block before the forward. - It uses the indices from corresponding encoder on it's level. - Its output is used as input to the next layer up. - - Args: - in_channels: number of input channels. - un_pool: predefined unpool block. - se_layer: predefined SELayer. Defaults to None. - dropout: predefined dropout block. Defaults to None. - kernel_size: Kernel size of convolution layers. Defaults to 5*5. - num_filters: number of input channels to each convolution layer. Defaults to 64. - """ - - def __init__(self, in_channels: int, un_pool, se_layer, dropout, kernel_size, num_filters): - super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) - self.un_pool = un_pool - - def forward(self, input, indices): - out_block, _ = super().forward(input, None) - out_block = self.un_pool(out_block, indices) - return out_block, None - - - class Bottleneck(ConvConcatDenseBlock): - """ - Returns the bottom or bottleneck layer at the bottom of a network linking encoder to decoder halves. - It consists of a 5 * 5 convolutional layer and a batch normalization layer to separate - the encoder and decoder part of the network, restricting information flow between the encoder and decoder. - - Args: - in_channels: number of input channels. - se_layer: predefined SELayer. Defaults to None. - dropout: predefined dropout block. Defaults to None. - un_pool: predefined unpool block. - max_pool: predefined maxpool block. - kernel_size: Kernel size of convolution layers. Defaults to 5*5. - num_filters: number of input channels to each convolution layer. Defaults to 64. - """ - - def __init__(self, in_channels: int, se_layer, dropout, max_pool, un_pool, kernel_size, num_filters): - super().__init__(in_channels, se_layer, dropout, kernel_size, num_filters) - self.max_pool = max_pool - self.un_pool = un_pool - - def forward(self, input, indices): - out_block, indices = self.max_pool(input) - out_block, _ = super().forward(out_block, None) - out_block = self.un_pool(out_block, indices) - return out_block, None From fcee6f6c8ca11b65aa5897ad8801ea41b36c23cd Mon Sep 17 00:00:00 2001 From: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> Date: Wed, 21 Jun 2023 12:50:00 +0200 Subject: [PATCH 52/68] Skip Quicknat test if squeeze-and-excitation not installed Signed-off-by: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> --- tests/test_quicknat.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_quicknat.py b/tests/test_quicknat.py index 7f6f180597..988ac26a44 100644 --- a/tests/test_quicknat.py +++ b/tests/test_quicknat.py @@ -18,8 +18,11 @@ from monai.networks import eval_mode from monai.networks.nets import Quicknat +from monai.utils import optional_import from tests.utils import test_script_save +_, has_se = optional_import("squeeze_and_excitation") + TEST_CASES = [ # params, input_shape, expected_shape [{"num_classes": 1, "num_channels": 1, "num_filters": 1, "se_block": None}, (1, 1, 32, 32), (1, 1, 32, 32)], @@ -32,7 +35,7 @@ [{"num_classes": 1, "num_channels": 1, "num_filters": 64, "se_block": "CSSE"}, (1, 1, 32, 32), (1, 1, 32, 32)], ] - +@unittest.skipUnless(has_se, "squeeze_and_excitation not installed") class TestQuicknat(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_shape, expected_shape): From 32973d6d2529abb1569de6fcc2fc928a7ec75ad8 Mon Sep 17 00:00:00 2001 From: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> Date: Wed, 21 Jun 2023 13:16:35 +0200 Subject: [PATCH 53/68] Skip daf3d test if torch vision not installed Signed-off-by: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> --- tests/test_daf3d.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_daf3d.py b/tests/test_daf3d.py index ccf629801f..9afcd47092 100644 --- a/tests/test_daf3d.py +++ b/tests/test_daf3d.py @@ -18,8 +18,11 @@ from monai.networks import eval_mode from monai.networks.nets import DAF3D +from monai.utils import optional_import from tests.utils import test_script_save +_, has_tv = optional_import("torchvision") + TEST_CASES = [ [{"in_channels": 1, "out_channels": 1}, (1, 1, 32, 32, 64), (1, 1, 32, 32, 64)], # single channel 3D, batch 1 [{"in_channels": 2, "out_channels": 1}, (3, 2, 32, 64, 128), (3, 1, 32, 64, 128)], # two channel 3D, batch 3 @@ -36,7 +39,7 @@ ], # four channel 3D, same in & out channels ] - +@unittest.SkipUnless(has_tv, "torchvision not installed") class TestDAF3D(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_shape, expected_shape): From 15caa82df912ced275c0cba40d9c5425f88ee8da Mon Sep 17 00:00:00 2001 From: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> Date: Wed, 21 Jun 2023 13:18:34 +0200 Subject: [PATCH 54/68] Update test_daf3d.py typo Signed-off-by: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> --- tests/test_daf3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_daf3d.py b/tests/test_daf3d.py index 9afcd47092..ca6dea152f 100644 --- a/tests/test_daf3d.py +++ b/tests/test_daf3d.py @@ -39,7 +39,7 @@ ], # four channel 3D, same in & out channels ] -@unittest.SkipUnless(has_tv, "torchvision not installed") +@unittest.skipUnless(has_tv, "torchvision not installed") class TestDAF3D(unittest.TestCase): @parameterized.expand(TEST_CASES) def test_shape(self, input_param, input_shape, expected_shape): From bafce625cee3f6fa1fa93b487ed8d1f851e8bdbc Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:14:15 +0200 Subject: [PATCH 55/68] type check for default se_layer and dropout layer in quicknat.py line 126 Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 3dfee964f3..9b117c130b 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -123,8 +123,8 @@ class ConvConcatDenseBlock(ConvDenseBlock): def __init__( self, in_channels: int, - se_layer: nn.Module | None = nn.Identity, - dropout_layer: type[nn.Dropout2d] | None = nn.Identity, + se_layer: nn.Module = nn.Identity, + dropout_layer: type[nn.Dropout2d] = nn.Identity, kernel_size: Sequence[int] | int = 5, num_filters: int = 64, ): From 6b01218087984210be1444d4535f9f3ad5ed05df Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Thu, 22 Jun 2023 14:40:58 +0200 Subject: [PATCH 56/68] codeformat --- tests/test_daf3d.py | 1 + tests/test_quicknat.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/test_daf3d.py b/tests/test_daf3d.py index ca6dea152f..3f8868e24b 100644 --- a/tests/test_daf3d.py +++ b/tests/test_daf3d.py @@ -39,6 +39,7 @@ ], # four channel 3D, same in & out channels ] + @unittest.skipUnless(has_tv, "torchvision not installed") class TestDAF3D(unittest.TestCase): @parameterized.expand(TEST_CASES) diff --git a/tests/test_quicknat.py b/tests/test_quicknat.py index 988ac26a44..b4b89b7d62 100644 --- a/tests/test_quicknat.py +++ b/tests/test_quicknat.py @@ -35,6 +35,7 @@ [{"num_classes": 1, "num_channels": 1, "num_filters": 64, "se_block": "CSSE"}, (1, 1, 32, 32), (1, 1, 32, 32)], ] + @unittest.skipUnless(has_se, "squeeze_and_excitation not installed") class TestQuicknat(unittest.TestCase): @parameterized.expand(TEST_CASES) From 5c259b5a5549751e817bdf1b029570b1de30aaea Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Thu, 22 Jun 2023 15:16:28 +0200 Subject: [PATCH 57/68] deleted enable_test_dropout() in quicknat.py Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 9b117c130b..8b28286dc5 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -310,6 +310,9 @@ class Quicknat(nn.Module): input (data from the previous layer concatenated with data from the skip connection) in the first convolution. this ensures the final output of the network has the same shape as the input. + The original QuickNAT implementation included a `enable_test_dropout()` mechanism for uncertainty estimation during + testing. As the dropout layers are the only stochastic components of this network calling the train() method instead of eval() in testing or inference has the same effect. + Args: num_classes: number of classes to segmentate (output channels). num_channels: number of input channels. @@ -423,17 +426,6 @@ def get_selayer(self, n_filters, se_block_type="None"): else: return None - # TODO: Do I include this: - def enable_test_dropout(self): - """ - Enables test time drop out for uncertainity - :return: - """ - attr_dict = self.__dict__["_modules"] - for i in range(1, 5): - encode_block, decode_block = (attr_dict["encode" + str(i)], attr_dict["decode" + str(i)]) - encode_block.drop_out = encode_block.drop_out.apply(nn.Module.train) - decode_block.drop_out = decode_block.drop_out.apply(nn.Module.train) @property def is_cuda(self): From 20e29dff9ab2627bff512f9e05ab54c0cca8f6a6 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Thu, 22 Jun 2023 15:43:31 +0200 Subject: [PATCH 58/68] codeformat --- monai/networks/nets/quicknat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 8b28286dc5..53677e444d 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -311,7 +311,8 @@ class Quicknat(nn.Module): ensures the final output of the network has the same shape as the input. The original QuickNAT implementation included a `enable_test_dropout()` mechanism for uncertainty estimation during - testing. As the dropout layers are the only stochastic components of this network calling the train() method instead of eval() in testing or inference has the same effect. + testing. As the dropout layers are the only stochastic components of this network calling the train() method instead + of eval() in testing or inference has the same effect. Args: num_classes: number of classes to segmentate (output channels). @@ -426,7 +427,6 @@ def get_selayer(self, n_filters, se_block_type="None"): else: return None - @property def is_cuda(self): """ From 6f298963a9ceb02e4c6314dfa57828cbfe261459 Mon Sep 17 00:00:00 2001 From: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> Date: Sat, 24 Jun 2023 09:31:06 +0200 Subject: [PATCH 59/68] fixed mypy issues in daf3d Signed-off-by: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> --- monai/networks/nets/daf3d.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index fb3de4b6c9..4fa0abe603 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -197,7 +197,7 @@ def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None) self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False) # adapt activation function - self.relu = nn.PReLU() + self.relu = nn.PReLU() # type: ignore class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck): @@ -288,12 +288,12 @@ def __init__( n_input_channels, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False ) self.bn1 = norm_type(32, 64) - self.relu = nn.PReLU() + self.relu = nn.PReLU() # type: ignore # adapt layers to our needs self.layer1 = self._make_layer(Daf3dResNetBottleneck, block_inplanes[0], layers[0], spatial_dims, shortcut_type) self.layer2 = self._make_layer( - Daf3dResNetBottleneck, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=(1, 2, 2) + Daf3dResNetBottleneck, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=(1, 2, 2) # type: ignore ) self.layer3 = self._make_layer( Daf3dResNetDilatedBottleneck, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=1 @@ -528,7 +528,7 @@ def __init__(self, in_channels, out_channels, visual_output=False): conv_out_channels=64, out_channels=64, kernel_sizes=(3, 3, 3, 3), - dilations=((1, 1, 1), (1, 6, 6), (1, 12, 12), (1, 18, 18)), + dilations=((1, 1, 1), (1, 6, 6), (1, 12, 12), (1, 18, 18)), # type: ignore norm_type=group_norm, acti_type=None, bias=True, @@ -568,7 +568,7 @@ def forward(self, x): F.interpolate(o, size=x.size()[2:], mode="trilinear") for o in supervised1 + supervised2 + supervised3 ] - output = supervised_final, supervised_inner + output = supervised_final + supervised_inner else: output = F.interpolate(supervised_final, size=x.size()[2:], mode="trilinear") return output From f007315511193f7e4fd3c4dcc4704cb0d2189729 Mon Sep 17 00:00:00 2001 From: Alexandra Marquardt Date: Sat, 24 Jun 2023 09:45:04 +0200 Subject: [PATCH 60/68] codeformat and mypy in daf3d Signed-off-by: Alexandra Marquardt --- monai/networks/nets/daf3d.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index 4fa0abe603..bf70820d14 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -197,7 +197,7 @@ def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None) self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False) # adapt activation function - self.relu = nn.PReLU() # type: ignore + self.relu = nn.PReLU() # type: ignore class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck): @@ -288,12 +288,12 @@ def __init__( n_input_channels, self.in_planes, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False ) self.bn1 = norm_type(32, 64) - self.relu = nn.PReLU() # type: ignore + self.relu = nn.PReLU() # type: ignore # adapt layers to our needs self.layer1 = self._make_layer(Daf3dResNetBottleneck, block_inplanes[0], layers[0], spatial_dims, shortcut_type) self.layer2 = self._make_layer( - Daf3dResNetBottleneck, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=(1, 2, 2) # type: ignore + Daf3dResNetBottleneck, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=(1, 2, 2) # type: ignore ) self.layer3 = self._make_layer( Daf3dResNetDilatedBottleneck, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=1 @@ -528,7 +528,7 @@ def __init__(self, in_channels, out_channels, visual_output=False): conv_out_channels=64, out_channels=64, kernel_sizes=(3, 3, 3, 3), - dilations=((1, 1, 1), (1, 6, 6), (1, 12, 12), (1, 18, 18)), # type: ignore + dilations=((1, 1, 1), (1, 6, 6), (1, 12, 12), (1, 18, 18)), # type: ignore norm_type=group_norm, acti_type=None, bias=True, From cebf558f6cb728f4f347f2d7ce8c99aaff03bbf6 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Sat, 24 Jun 2023 10:17:42 +0200 Subject: [PATCH 61/68] workaround for type check Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 53677e444d..63a5dbd64c 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -123,8 +123,8 @@ class ConvConcatDenseBlock(ConvDenseBlock): def __init__( self, in_channels: int, - se_layer: nn.Module = nn.Identity, - dropout_layer: type[nn.Dropout2d] = nn.Identity, + se_layer: Optional[nn.Module] = None, + dropout_layer: Optional[type[nn.Dropout2d]] = None, kernel_size: Sequence[int] | int = 5, num_filters: int = 64, ): @@ -137,6 +137,12 @@ def __init__( norm=("instance", {"num_features": in_channels}), kernel_size=kernel_size, ) + if se_layer is None: + se_layer = nn.Identity() + + if dropout_layer is None: + dropout_layer = nn.Identity() + self.se_layer = se_layer self.dropout_layer = dropout_layer From bd8e9970f385a7bd5d7414c2b2d893bfed74b7f4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 24 Jun 2023 08:18:06 +0000 Subject: [PATCH 62/68] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/nets/quicknat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 63a5dbd64c..cc762465b0 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -139,10 +139,10 @@ def __init__( ) if se_layer is None: se_layer = nn.Identity() - + if dropout_layer is None: dropout_layer = nn.Identity() - + self.se_layer = se_layer self.dropout_layer = dropout_layer From d75bd8d20e91de5810fb1883d1e314e5184719ac Mon Sep 17 00:00:00 2001 From: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> Date: Sat, 24 Jun 2023 12:11:13 +0200 Subject: [PATCH 63/68] Update monai/networks/nets/daf3d.py Co-authored-by: Wenqi Li <831580+wyli@users.noreply.github.com> Signed-off-by: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> --- monai/networks/nets/daf3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index bf70820d14..87686e181a 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -544,7 +544,7 @@ def forward(self, x): mlf = self.fuse(torch.cat(single_layer_features, 1)) attentive_features_maps = [self.attention(slf, mlf) for slf in single_layer_features] - att_features, att_maps = zip(*attentive_features_maps) + att_features, att_maps = tuple(zip(*attentive_features_maps)) # second 4 supervised signals (af 1 - 4) supervised2 = [self.predict2(af) for af in att_features] From a0daad13dc45317b0aa63181017d34aa9e6dc268 Mon Sep 17 00:00:00 2001 From: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> Date: Sat, 24 Jun 2023 12:15:19 +0200 Subject: [PATCH 64/68] Added import of Optional Signed-off-by: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index cc762465b0..f6d1309c5b 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -11,7 +11,7 @@ from __future__ import annotations -from typing import Sequence, Tuple, Union +from typing import Optional, Sequence, Tuple, Union import torch import torch.nn as nn From fd3196b19f8642d45dd76b6c8f74ed2ce37984dc Mon Sep 17 00:00:00 2001 From: Wenqi Li <831580+wyli@users.noreply.github.com> Date: Sat, 24 Jun 2023 13:20:59 +0100 Subject: [PATCH 65/68] Update tests/test_daf3d.py Signed-off-by: Wenqi Li <831580+wyli@users.noreply.github.com> --- tests/test_daf3d.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_daf3d.py b/tests/test_daf3d.py index 3f8868e24b..34e25cc6be 100644 --- a/tests/test_daf3d.py +++ b/tests/test_daf3d.py @@ -51,6 +51,7 @@ def test_shape(self, input_param, input_shape, expected_shape): result = net(torch.randn(input_shape).to(device)) self.assertEqual(result.shape, expected_shape) + @unittest.skip("daf3d: torchscript not currently supported") def test_script(self): net = DAF3D(in_channels=1, out_channels=1) test_data = torch.randn(16, 1, 32, 32) From 999cf9529899cd12ea9cf53f764fbb20505ea21c Mon Sep 17 00:00:00 2001 From: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> Date: Sat, 24 Jun 2023 15:01:18 +0200 Subject: [PATCH 66/68] Update daf3d.py Signed-off-by: Al3xand1a <98582325+Al3xand1a@users.noreply.github.com> --- monai/networks/nets/daf3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/daf3d.py b/monai/networks/nets/daf3d.py index 87686e181a..5a83cdc600 100644 --- a/monai/networks/nets/daf3d.py +++ b/monai/networks/nets/daf3d.py @@ -568,7 +568,7 @@ def forward(self, x): F.interpolate(o, size=x.size()[2:], mode="trilinear") for o in supervised1 + supervised2 + supervised3 ] - output = supervised_final + supervised_inner + output = [supervised_final] + supervised_inner else: output = F.interpolate(supervised_final, size=x.size()[2:], mode="trilinear") return output From 83dafd9bdc2981f79062b44feb51c77f01f9e8f9 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Sat, 24 Jun 2023 15:12:45 +0200 Subject: [PATCH 67/68] changed the default assignment for se_layer and dropout2d Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index f6d1309c5b..28b8cba10d 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -137,14 +137,8 @@ def __init__( norm=("instance", {"num_features": in_channels}), kernel_size=kernel_size, ) - if se_layer is None: - se_layer = nn.Identity() - - if dropout_layer is None: - dropout_layer = nn.Identity() - - self.se_layer = se_layer - self.dropout_layer = dropout_layer + self.se_layer = se_layer if se_layer is not None else nn.Identity() + self.dropout_layer = dropout_layer if dropout_layer is not None else nn.Identity() def _get_layer(self, in_channels, out_channels, dilation): """ From 0b8fd2d6bba2468b9c507c9e7bf63ae7506c8135 Mon Sep 17 00:00:00 2001 From: ge96lip <73938628+ge96lip@users.noreply.github.com> Date: Sat, 24 Jun 2023 15:15:04 +0200 Subject: [PATCH 68/68] cleaned up code quicknat.py Signed-off-by: ge96lip <73938628+ge96lip@users.noreply.github.com> --- monai/networks/nets/quicknat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/networks/nets/quicknat.py b/monai/networks/nets/quicknat.py index 28b8cba10d..cbcccf24d7 100644 --- a/monai/networks/nets/quicknat.py +++ b/monai/networks/nets/quicknat.py @@ -124,7 +124,7 @@ def __init__( self, in_channels: int, se_layer: Optional[nn.Module] = None, - dropout_layer: Optional[type[nn.Dropout2d]] = None, + dropout_layer: Optional[nn.Dropout2d] = None, kernel_size: Sequence[int] | int = 5, num_filters: int = 64, ):