Skip to content

Commit

Permalink
Replace deprecated arg in torchvision models (#6401)
Browse files Browse the repository at this point in the history
### Description

For torchvision models, the parameter `pretrained` is deprecated since
0.13 and may be removed in the future. Our `TorchVisionFC` model has
been using this parameter. This PR updated `TorchVisionFC` to remove
calling pretrained and instead to call `weights` with appropriate
defaults without any change in the functionality (backward compatible).

### Types of changes
<!--- Put an `x` in all the boxes that apply, and remove the not
applicable items -->
- [x] Non-breaking change (fix or new feature that would not break
existing functionality).
- [x] Integration tests passed locally by running `./runtests.sh -f -u
--net --coverage`.
- [x] Quick tests passed locally by running `./runtests.sh --quick
--unittests --disttests`.

---------

Signed-off-by: Behrooz <3968947+drbeh@users.noreply.github.com>
Signed-off-by: Wenqi Li <wenqil@nvidia.com>
Co-authored-by: Nic Ma <nma@nvidia.com>
Co-authored-by: Wenqi Li <wenqil@nvidia.com>
  • Loading branch information
3 people authored Apr 20, 2023
1 parent 7157832 commit 05dbc86
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 1 deletion.
4 changes: 3 additions & 1 deletion monai/networks/nets/torchvision_fc.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,10 @@ def __init__(
):
if weights is not None:
model = getattr(models, model_name)(weights=weights, **kwargs)
elif pretrained:
model = getattr(models, model_name)(weights="DEFAULT", **kwargs)
else:
model = getattr(models, model_name)(pretrained=pretrained, **kwargs) # 'pretrained' deprecated 0.13
model = getattr(models, model_name)(weights=None, **kwargs)

super().__init__(
model=model,
Expand Down
12 changes: 12 additions & 0 deletions tests/test_dynunet.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,11 @@

from monai.networks import eval_mode
from monai.networks.nets import DynUNet
from monai.utils import optional_import
from tests.utils import assert_allclose, skip_if_no_cuda, skip_if_windows, test_script_save

InstanceNorm3dNVFuser, _ = optional_import("apex.normalization", name="InstanceNorm3dNVFuser")

device = "cuda" if torch.cuda.is_available() else "cpu"

strides: Sequence[Sequence[int] | int]
Expand Down Expand Up @@ -125,6 +128,15 @@ def test_script(self):
@skip_if_no_cuda
@skip_if_windows
class TestDynUNetWithInstanceNorm3dNVFuser(unittest.TestCase):
def setUp(self):
try:
layer = InstanceNorm3dNVFuser(num_features=1, affine=False).to("cuda:0")
inp = torch.randn([1, 1, 1, 1, 1]).to("cuda:0")
out = layer(inp)
del inp, out, layer
except Exception:
self.skipTest("NVFuser not available")

@parameterized.expand([TEST_CASE_DYNUNET_3D[0]])
def test_consistency(self, input_param, input_shape, _):
for eps in [1e-4, 1e-5]:
Expand Down

0 comments on commit 05dbc86

Please sign in to comment.