From cae969215a5e1a82323577f4dd6028b62cc0854c Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 08:41:03 +0200 Subject: [PATCH 01/10] ADD: init_weights config for googlenet --- test/test_models.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/test_models.py b/test/test_models.py index 29b57c60cca..73fa9797498 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -316,6 +316,10 @@ def _check_input_backprop(model, inputs): "s3d": { "input_shape": (1, 3, 16, 224, 224), }, + "googlenet": { + "num_classes": 50, + "init_weights": False, + } } # speeding up slow models: slow_models = [ @@ -617,7 +621,7 @@ def test_vitc_models(model_fn, dev): test_classification_model(model_fn, dev) -@pytest.mark.parametrize("model_fn", list_model_fns(models)) +@pytest.mark.parametrize("model_fn", [get_model_builder("googlenet")]) @pytest.mark.parametrize("dev", cpu_and_gpu()) def test_classification_model(model_fn, dev): set_rng_seed(0) From d70f10c0acd922ae358ad6f55286629fa488a38d Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 09:40:21 +0200 Subject: [PATCH 02/10] Fix: Inception and googlenet warnings --- test/test_models.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test/test_models.py b/test/test_models.py index 73fa9797498..b5e2219f371 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -242,7 +242,10 @@ def _check_input_backprop(model, inputs): # The following contains configuration parameters for all models which are used by # the _test_*_model methods. _model_params = { - "inception_v3": {"input_shape": (1, 3, 299, 299)}, + "inception_v3": { + "input_shape": (1, 3, 299, 299), + "init_weights": True + }, "retinanet_resnet50_fpn": { "num_classes": 20, "score_thresh": 0.01, @@ -318,8 +321,8 @@ def _check_input_backprop(model, inputs): }, "googlenet": { "num_classes": 50, - "init_weights": False, - } + "init_weights": True, + }, } # speeding up slow models: slow_models = [ @@ -621,7 +624,7 @@ def test_vitc_models(model_fn, dev): test_classification_model(model_fn, dev) -@pytest.mark.parametrize("model_fn", [get_model_builder("googlenet")]) +@pytest.mark.parametrize("model_fn", list_model_fns(models)) @pytest.mark.parametrize("dev", cpu_and_gpu()) def test_classification_model(model_fn, dev): set_rng_seed(0) From 3f5176f2e44d4b535e71e7d5ac2aa3a12aa35777 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 09:40:54 +0200 Subject: [PATCH 03/10] Fix: warning in test_datasets.py --- test/test_datasets.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_datasets.py b/test/test_datasets.py index e16f2a1609a..e479667c32b 100644 --- a/test/test_datasets.py +++ b/test/test_datasets.py @@ -617,7 +617,6 @@ class VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase): year=[f"20{year:02d}" for year in range(7, 13)], image_set=("train", "val", "trainval") ), dict(year="2007", image_set="test"), - dict(year="2007-test", image_set="test"), ) def inject_fake_data(self, tmpdir, config): From 926a52274f33930bf980c9d2565fb9b67a86d731 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 09:55:53 +0200 Subject: [PATCH 04/10] Fix: Formatting error with ufmt --- test/test_models.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/test_models.py b/test/test_models.py index b5e2219f371..bec5b5c1ee9 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -242,10 +242,7 @@ def _check_input_backprop(model, inputs): # The following contains configuration parameters for all models which are used by # the _test_*_model methods. _model_params = { - "inception_v3": { - "input_shape": (1, 3, 299, 299), - "init_weights": True - }, + "inception_v3": {"input_shape": (1, 3, 299, 299), "init_weights": True}, "retinanet_resnet50_fpn": { "num_classes": 20, "score_thresh": 0.01, From 00ef56de6d82a3790743cce74bc7634ec5f4b748 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 11:34:13 +0200 Subject: [PATCH 05/10] Fix: Failing tests in quantized_classification_model --- test/test_models.py | 1 - torchvision/models/quantization/inception.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_models.py b/test/test_models.py index bec5b5c1ee9..debbb1b1583 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -317,7 +317,6 @@ def _check_input_backprop(model, inputs): "input_shape": (1, 3, 16, 224, 224), }, "googlenet": { - "num_classes": 50, "init_weights": True, }, } diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index 5af73c80fa0..a9af67a9200 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -128,6 +128,7 @@ def __init__( num_classes: int = 1000, aux_logits: bool = True, transform_input: bool = False, + init_weights: Optional[bool] = None, ) -> None: super().__init__( num_classes=num_classes, @@ -142,6 +143,7 @@ def __init__( QuantizableInceptionE, QuantizableInceptionAux, ], + init_weights=init_weights, ) self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() From 6f9685c5af444495f0cc75a6a962d1aeaaacbbf4 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 13:38:01 +0200 Subject: [PATCH 06/10] Update test/test_models.py to make googlenet in 1 line Co-authored-by: Philip Meier --- test/test_models.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/test_models.py b/test/test_models.py index debbb1b1583..df9f2bb9355 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -316,9 +316,7 @@ def _check_input_backprop(model, inputs): "s3d": { "input_shape": (1, 3, 16, 224, 224), }, - "googlenet": { - "init_weights": True, - }, + "googlenet": {"init_weights": True}, } # speeding up slow models: slow_models = [ From b6a2438ca4aeb39528f1972328fbc42ff4d0a0b5 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 15:04:16 +0200 Subject: [PATCH 07/10] Refactor: Change inception quantisation class initialization to use args/kwargs --- torchvision/models/quantization/inception.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index a9af67a9200..9d5ec94a78c 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -123,17 +123,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class QuantizableInception3(inception_module.Inception3): - def __init__( - self, - num_classes: int = 1000, - aux_logits: bool = True, - transform_input: bool = False, - init_weights: Optional[bool] = None, - ) -> None: + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__( - num_classes=num_classes, - aux_logits=aux_logits, - transform_input=transform_input, inception_blocks=[ QuantizableBasicConv2d, QuantizableInceptionA, @@ -143,7 +134,8 @@ def __init__( QuantizableInceptionE, QuantizableInceptionAux, ], - init_weights=init_weights, + *args, + **kwargs, ) self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() From 79647030d6568702fb3dc79ba5057c3d77a57deb Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Fri, 16 Sep 2022 15:17:42 +0200 Subject: [PATCH 08/10] Resolve mypy issue --- torchvision/models/quantization/inception.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index 9d5ec94a78c..cb92709fc7a 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -124,7 +124,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class QuantizableInception3(inception_module.Inception3): def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__( + super().__init__( # type: ignore[misc] inception_blocks=[ QuantizableBasicConv2d, QuantizableInceptionA, From e806a782776c4ef3f478606c85043ed2ae7f2e18 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Mon, 19 Sep 2022 08:46:18 +0200 Subject: [PATCH 09/10] Move *args before inception_blocks --- torchvision/models/quantization/inception.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index cb92709fc7a..ebbd39a0cec 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -125,6 +125,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class QuantizableInception3(inception_module.Inception3): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__( # type: ignore[misc] + *args, inception_blocks=[ QuantizableBasicConv2d, QuantizableInceptionA, @@ -134,7 +135,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: QuantizableInceptionE, QuantizableInceptionAux, ], - *args, **kwargs, ) self.quant = torch.ao.quantization.QuantStub() From d67e16637bfc979940ba3ecc0d5f83df7d7819a9 Mon Sep 17 00:00:00 2001 From: Ambuj Pawar Date: Mon, 19 Sep 2022 12:49:26 +0200 Subject: [PATCH 10/10] Move args keywords before other arguments --- torchvision/models/quantization/googlenet.py | 6 +++--- torchvision/models/quantization/inception.py | 12 ++++++------ torchvision/models/quantization/mobilenetv3.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/torchvision/models/quantization/googlenet.py b/torchvision/models/quantization/googlenet.py index a75beb131b7..abf2184acec 100644 --- a/torchvision/models/quantization/googlenet.py +++ b/torchvision/models/quantization/googlenet.py @@ -39,7 +39,7 @@ def fuse_model(self, is_qat: Optional[bool] = None) -> None: class QuantizableInception(Inception): def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.cat = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -50,7 +50,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionAux(InceptionAux): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: @@ -75,7 +75,7 @@ class QuantizableGoogLeNet(GoogLeNet): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__( # type: ignore[misc] - blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], *args, **kwargs + *args, blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], **kwargs ) self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() diff --git a/torchvision/models/quantization/inception.py b/torchvision/models/quantization/inception.py index ebbd39a0cec..34cd2a0a36a 100644 --- a/torchvision/models/quantization/inception.py +++ b/torchvision/models/quantization/inception.py @@ -41,7 +41,7 @@ def fuse_model(self, is_qat: Optional[bool] = None) -> None: class QuantizableInceptionA(inception_module.InceptionA): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -52,7 +52,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionB(inception_module.InceptionB): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -63,7 +63,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionC(inception_module.InceptionC): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -74,7 +74,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionD(inception_module.InceptionD): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: @@ -85,7 +85,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionE(inception_module.InceptionE): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] self.myop1 = nn.quantized.FloatFunctional() self.myop2 = nn.quantized.FloatFunctional() self.myop3 = nn.quantized.FloatFunctional() @@ -119,7 +119,7 @@ def forward(self, x: Tensor) -> Tensor: class QuantizableInceptionAux(inception_module.InceptionAux): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc] class QuantizableInception3(inception_module.Inception3): diff --git a/torchvision/models/quantization/mobilenetv3.py b/torchvision/models/quantization/mobilenetv3.py index 986f67c6080..53229c09534 100644 --- a/torchvision/models/quantization/mobilenetv3.py +++ b/torchvision/models/quantization/mobilenetv3.py @@ -83,7 +83,7 @@ def _load_from_state_dict( class QuantizableInvertedResidual(InvertedResidual): # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659 def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(se_layer=QuantizableSqueezeExcitation, *args, **kwargs) # type: ignore[misc] + super().__init__(*args, se_layer=QuantizableSqueezeExcitation, **kwargs) # type: ignore[misc] self.skip_add = nn.quantized.FloatFunctional() def forward(self, x: Tensor) -> Tensor: