Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

6676 port generative networks spade #7320

Merged
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions docs/source/networks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,7 @@ Blocks
.. autoclass:: monai.apps.reconstruction.networks.blocks.varnetblock.VarNetBlock
:members:


marksgraham marked this conversation as resolved.
Show resolved Hide resolved
N-Dim Fourier Transform
~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: monai.networks.blocks.fft_utils_t
Expand All @@ -258,6 +259,10 @@ N-Dim Fourier Transform
.. autofunction:: monai.networks.blocks.fft_utils_t.fftshift
.. autofunction:: monai.networks.blocks.fft_utils_t.ifftshift

`SPADE`
~~~~~~~
.. autoclass:: monai.networks.blocks.spade_norm.SPADE
:members:

Layers
------
Expand Down Expand Up @@ -588,6 +593,11 @@ Nets
.. autoclass:: DiffusionModelUNet
:members:

`SPADEDiffusionModelUNet`
~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: SPADEDiffusionModelUNet
:members:

`ControlNet`
~~~~~~~~~~~~
.. autoclass:: ControlNet
Expand Down Expand Up @@ -618,6 +628,11 @@ Nets
.. autoclass:: AutoencoderKL
:members:

`SPADEAutoencoderKL`
~~~~~~~~~~~~~~~~~~~~
.. autoclass:: SPADEAutoencoderKL
:members:

`VarAutoEncoder`
~~~~~~~~~~~~~~~~
.. autoclass:: VarAutoEncoder
Expand Down
1 change: 1 addition & 0 deletions monai/networks/blocks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from .regunet_block import RegistrationDownSampleBlock, RegistrationExtractionBlock, RegistrationResidualConvBlock
from .segresnet_block import ResBlock
from .selfattention import SABlock
from .spade_norm import SPADE
from .squeeze_and_excitation import (
ChannelSELayer,
ResidualSELayer,
Expand Down
97 changes: 97 additions & 0 deletions monai/networks/blocks/spade_norm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import torch
import torch.nn as nn
import torch.nn.functional as F

from monai.networks.blocks import ADN, Convolution


class SPADE(nn.Module):
"""
SPADE normalisation block based on the 2019 paper by Park et al. (doi: https://doi.org/10.48550/arXiv.1903.07291)
marksgraham marked this conversation as resolved.
Show resolved Hide resolved

Args:
label_nc: number of semantic labels
norm_nc: number of output channels
kernel_size: kernel size
spatial_dims: number of spatial dimensions
hidden_channels: number of channels in the intermediate gamma and beta layers
norm: type of base normalisation used before applying the SPADE normalisation
norm_params: parameters for the base normalisation
"""

def __init__(
self,
label_nc: int,
norm_nc: int,
kernel_size: int = 3,
spatial_dims: int = 2,
hidden_channels: int = 64,
norm: str | tuple = "INSTANCE",
norm_params: dict | None = None,
marksgraham marked this conversation as resolved.
Show resolved Hide resolved
) -> None:
super().__init__()

if norm_params is None:
norm_params = {}
if len(norm_params) != 0:
norm = (norm, norm_params)
self.param_free_norm = ADN(
marksgraham marked this conversation as resolved.
Show resolved Hide resolved
act=None, dropout=0.0, norm=norm, norm_dim=spatial_dims, ordering="N", in_channels=norm_nc
)
self.mlp_shared = Convolution(
spatial_dims=spatial_dims,
in_channels=label_nc,
out_channels=hidden_channels,
kernel_size=kernel_size,
norm=None,
padding=kernel_size // 2,
marksgraham marked this conversation as resolved.
Show resolved Hide resolved
act="LEAKYRELU",
marksgraham marked this conversation as resolved.
Show resolved Hide resolved
)
self.mlp_gamma = Convolution(
spatial_dims=spatial_dims,
in_channels=hidden_channels,
out_channels=norm_nc,
kernel_size=kernel_size,
padding=kernel_size // 2,
act=None,
)
self.mlp_beta = Convolution(
spatial_dims=spatial_dims,
in_channels=hidden_channels,
out_channels=norm_nc,
kernel_size=kernel_size,
padding=kernel_size // 2,
act=None,
)

def forward(self, x: torch.Tensor, segmap: torch.Tensor) -> torch.Tensor:
"""
Args:
x: input tensor
marksgraham marked this conversation as resolved.
Show resolved Hide resolved
segmap: input segmentation map (bxcx[spatial-dimensions]) where c is the number of semantic channels.
marksgraham marked this conversation as resolved.
Show resolved Hide resolved
The map will be interpolated to the dimension of x internally.
"""

# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)

# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode="nearest")
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
out: torch.Tensor = normalized * (1 + gamma) + beta
return out
2 changes: 2 additions & 0 deletions monai/networks/nets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@
seresnext50,
seresnext101,
)
from .spade_autoencoderkl import SPADEAutoencoderKL
from .spade_diffusion_model_unet import SPADEDiffusionModelUNet
from .swin_unetr import PatchMerging, PatchMergingV2, SwinUNETR
from .torchvision_fc import TorchVisionFCModel
from .transchex import BertAttention, BertMixedLayer, BertOutput, BertPreTrainedModel, MultiModal, Pooler, Transchex
Expand Down
Loading
Loading