diff --git a/test/test_datasets.py b/test/test_datasets.py index e16f2a1609a..e479667c32b 100644 --- a/test/test_datasets.py +++ b/test/test_datasets.py @@ -617,7 +617,6 @@ class VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase): year=[f"20{year:02d}" for year in range(7, 13)], image_set=("train", "val", "trainval") ), dict(year="2007", image_set="test"), - dict(year="2007-test", image_set="test"), ) def inject_fake_data(self, tmpdir, config): diff --git a/test/test_models.py b/test/test_models.py index e9eeed7c196..4ca9a5bbf1d 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -244,7 +244,7 @@ def _check_input_backprop(model, inputs): # The following contains configuration parameters for all models which are used by # the _test_*_model methods. _model_params = { - "inception_v3": {"input_shape": (1, 3, 299, 299)}, + "inception_v3": {"input_shape": (1, 3, 299, 299), "init_weights": True}, "retinanet_resnet50_fpn": { "num_classes": 20, "score_thresh": 0.01, @@ -318,6 +318,7 @@ def _check_input_backprop(model, inputs): "s3d": { "input_shape": (1, 3, 16, 224, 224), }, + "googlenet": {"init_weights": True}, } # speeding up slow models: slow_models = [ diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index a75beb131b7..abf2184acec 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -39,7 +39,7 @@ def fuse_model(self, is_qat: Optional[bool] = None) -> None: class QuantizableInception(Inception): def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.cat = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -50,7 +50,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionAux(InceptionAux): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: @@ -75,7 +75,7 @@ class QuantizableGoogLeNet(GoogLeNet): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__( # type: ignore[misc] - blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], *args, **kwargs + *args, blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], **kwargs ) self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index 5af73c80fa0..34cd2a0a36a 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -41,7 +41,7 @@ def fuse_model(self, is_qat: Optional[bool] = None) -> None: class QuantizableInceptionA(inception_module.InceptionA): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -52,7 +52,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionB(inception_module.InceptionB): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -63,7 +63,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionC(inception_module.InceptionC): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -74,7 +74,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionD(inception_module.InceptionD): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -85,7 +85,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionE(inception_module.InceptionE): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop1 = nn.quantized.FloatFunctional() self.myop2 = nn.quantized.FloatFunctional() self.myop3 = nn.quantized.FloatFunctional() @@ -119,20 +119,13 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionAux(inception_module.InceptionAux): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] class QuantizableInception3(inception_module.Inception3): - def __init__( - self, - num_classes: int = 1000, - aux_logits: bool = True, - transform_input: bool = False, - ) -> None: - super().__init__( - num_classes=num_classes, - aux_logits=aux_logits, - transform_input=transform_input, + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__( # type: ignore[misc] + *args, inception_blocks=[ QuantizableBasicConv2d, QuantizableInceptionA, @@ -142,6 +135,7 @@ def __init__( QuantizableInceptionE, QuantizableInceptionAux, ], + **kwargs, ) self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index 986f67c6080..53229c09534 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -83,7 +83,7 @@ def _load_from_state_dict( class QuantizableInvertedResidual(InvertedResidual): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(se_layer=QuantizableSqueezeExcitation, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, se_layer=QuantizableSqueezeExcitation, **kwargs) # type: ignore[misc] self.skip_add = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: