Skip to content

Commit

Permalink
Revised importing QuantSim and updated front readme page
Browse files Browse the repository at this point in the history
Signed-off-by: Bharath Ramaswamy <quic_bharathr@quicinc.com>
  • Loading branch information
quic-bharathr committed Jan 25, 2021
1 parent 2cacf5c commit e0b47fb
Show file tree
Hide file tree
Showing 7 changed files with 35 additions and 74 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ An original FP32 source model is quantized either using post-training quantizati
<td>EfficientNet-lite0</td>
<td><a href="https://github.com/rwightman/gen-efficientnet-pytorch">GitHub Repo</a></td>
<td><a href="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth">Pretrained Model</a></td>
<td><a href="zoo_torch/examples/eval_efficientnetlite0.py">See Example</a></td>
<td><a href="/../../releases/download/pt-effnet-checkpoint/adaround_efficient_lite.pth">Quantized Model</a></td>
<td>(ImageNet) Top-1 Accuracy <br> FP32: 75.42%<br> INT8: 74.44%</td>
<td><a href="zoo_torch/Docs/EfficientNet-lite0.md">EfficientNet-lite0.md</a></td>
</tr>
Expand Down
36 changes: 0 additions & 36 deletions zoo_torch/Docs/MobileNetV2-SSD-lite.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,43 +16,7 @@ git apply ../aimet-model-zoo/zoo_torch/examples/torch_ssd_eval.patch
mv vision ../aimet-model-zoo/zoo_torch/examples/
mv eval_ssd.py ../aimet-model-zoo/zoo_torch/examples/
```
3. Change __init__ function from line #27 in vision/ssd/ssd.py as follows:
```
self.config = None #############Change 1
self.image_size = 300
self.image_mean = np.array([127, 127, 127]) # RGB layout
self.image_std = 128.0
self.iou_threshold = 0.45
self.center_variance = 0.1
self.size_variance = 0.2
self.specs = [box_utils.SSDSpec(19, 16, box_utils.SSDBoxSizes(60, 105), [2, 3]),
box_utils.SSDSpec(10, 32, box_utils.SSDBoxSizes(105, 150), [2, 3]),
box_utils.SSDSpec(5, 64, box_utils.SSDBoxSizes(150, 195), [2, 3]),
box_utils.SSDSpec(3, 100, box_utils.SSDBoxSizes(195, 240), [2, 3]),
box_utils.SSDSpec(2, 150, box_utils.SSDBoxSizes(240, 285), [2, 3]),
box_utils.SSDSpec(1, 300, box_utils.SSDBoxSizes(285, 330), [2, 3])]
self.gen_priors = box_utils.generate_ssd_priors(self.specs, self.image_size)

# register layers in source_layer_indexes by adding them to a module list
self.source_layer_add_ons = nn.ModuleList([t[1] for t in source_layer_indexes
if isinstance(t, tuple) and not isinstance(t, GraphPath)])
if device:
self.device = device
else:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if is_test:
self.priors = self.gen_priors.to(self.device)
```
4. Change line #93 in vision/ssd/ssd.py as follows:
```
boxes = box_utils.convert_locations_to_boxes(
locations.cpu(), self.priors.cpu(), self.center_variance, self.size_variance
)
```

## Obtaining model checkpoint and dataset
- The original MobileNetV2-SSD-lite checkpoint can be downloaded here:
Expand Down
9 changes: 4 additions & 5 deletions zoo_torch/Docs/MobilenetV2.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,6 @@ self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last
- Change line #91 as follows in MobileNetV2.py
```
output_channel = int(c * width_mult)
```
- Append line #100 as follows in MobileNetV2.py
```
self.features.append(nn.AvgPool2d(input_size // 32)
```
- Change line #104 as follows in MobileNetV2.py
```
Expand All @@ -40,6 +36,10 @@ self.classifier = nn.Sequential(
- Change line #110 as follows in MobileNetV2.py
```
x = x.squeeze()
```
- Append line #100 as follows in MobileNetV2.py
```
self.features.append(nn.AvgPool2d(input_size // 32))
```
## Obtaining model checkpoint and dataset

Expand All @@ -56,7 +56,6 @@ python eval_mobilenetv2.py \
--model-path <path to optimized mobilenetv2 checkpoint> \
--images-dir <path to imagenet root directory> \
--quant-scheme <quantization schme to run> \
--input-shape <input shape to model> \
--default-output-bw <bitwidth for activation quantization> \
--default-param-bw <bitwidth for weight quantization>
```
Expand Down
22 changes: 10 additions & 12 deletions zoo_torch/examples/eval_deeplabv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,28 +47,26 @@ def func_wrapper(model, arguments):
return func_wrapper




def arguments():
parser = argparse.ArgumentParser(description='Evaluation script for PyTorch ImageNet networks.')

parser.add_argument('--checkpoint-path', help='Path to optimized checkpoint directory to load from.', default = None, type=str)
parser.add_argument('--base-size', help='Base size for Random Crop', default=513)
parser.add_argument('--crop-size', help='Crop size for Random Crop', default=513)
parser.add_argument('--num-classes', help='Number of classes in a dataset', default=21)
parser.add_argument('--dataset', help='dataset used for evaluation', default='pascal')
parser.add_argument('--base-size', help='Base size for Random Crop', type = int, default=513)
parser.add_argument('--crop-size', help='Crop size for Random Crop', type = int, default=513)
parser.add_argument('--num-classes', help='Number of classes in a dataset', type = int, default=21)
parser.add_argument('--dataset', help='dataset used for evaluation', default='pascal', type = str)

parser.add_argument('--seed', help='Seed number for reproducibility', default=0)
parser.add_argument('--use-sbd', help='Use SBD data for data augmentation during training', default=False)

parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf', choices = ['tf', 'tf_enhanced', 'range_learning_tf', 'range_learning_tf_enhanced'])
parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest')
parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', default=8)
parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', default=8)
parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', type = int, default=8)
parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', type = int, default=8)
parser.add_argument('--config-file', help='Quantsim configuration file.', default=None, type=str)
parser.add_argument('--cuda', help='Enable cuda for a model', default=True)

parser.add_argument('--batch-size', help='Data batch size for a model', default=16)
parser.add_argument('--batch-size', help='Data batch size for a model', type = int, default=16)
args = parser.parse_args()
return args

Expand All @@ -87,13 +85,13 @@ def main():
model = DeepLab(backbone='mobilenet', output_stride=16, num_classes=21,
sync_bn=False)
model.eval()

from aimet_torch import batch_norm_fold
from aimet_torch import utils
args.input_shape = (1,3,513,513)
batch_norm_fold.fold_all_batch_norms(model, args.input_shape)
utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6, torch.nn.ReLU)
# from IPython import embed; embed()

if args.checkpoint_path:
model.load_state_dict(torch.load(args.checkpoint_path))
else:
Expand All @@ -105,7 +103,7 @@ def main():
eval_func = model_eval(args, val_loader)

from aimet_common.defs import QuantScheme
from aimet_torch.pro.quantsim import QuantizationSimModel
from aimet_torch.quantsim import QuantizationSimModel
if hasattr(args, 'quant_scheme'):
if args.quant_scheme == 'range_learning_tf':
quant_scheme = QuantScheme.training_range_learning_with_tf_init
Expand Down
20 changes: 10 additions & 10 deletions zoo_torch/examples/eval_efficientnetlite0.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from aimet_torch import cross_layer_equalization
from aimet_torch import batch_norm_fold
from aimet_common.defs import QuantScheme
from aimet_torch.pro.quantsim import QuantizationSimModel
from aimet_torch.quantsim import QuantizationSimModel
from aimet_torch.onnx_utils import onnx_pytorch_conn_graph_type_pairs
from aimet_common.utils import AimetLogger
import logging
Expand Down Expand Up @@ -111,19 +111,18 @@ def arguments():

parser.add_argument('--checkpoint', help='Path to optimized checkpoint', default=None, type=str)
parser.add_argument('--images-dir', help='Imagenet eval image', default='./ILSVRC2012_PyTorch/', type=str)
parser.add_argument('--input-shape', help='Model to an input image shape, (ex : [batch, channel, width, height]', default=(1,3,224,224))
parser.add_argument('--seed', help='Seed number for reproducibility', default=0)
parser.add_argument('--seed', help='Seed number for reproducibility', type = int, default=0)

parser.add_argument('--quant-tricks', help='Preprocessing prior to Quantization', default=[], choices=['BNfold', 'CLE'], nargs = "+")
parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf', choices = ['tf', 'tf_enhanced', 'range_learning_tf', 'range_learning_tf_enhanced'])
parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest')
parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', default=8)
parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', default=8)
parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', type = int, default=8)
parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', type = int, default=8)
parser.add_argument('--config-file', help='Quantsim configuration file.', default=None, type=str)
parser.add_argument('--cuda', help='Enable cuda for a model', default=True)

parser.add_argument('--batch-size', help='Data batch size for a model', default=64)
parser.add_argument('--num-workers', help='Number of workers to run data loader in parallel', default=16)
parser.add_argument('--batch-size', help='Data batch size for a model', type = int, default=64)
parser.add_argument('--num-workers', help='Number of workers to run data loader in parallel', type = int, default=16)

args = parser.parse_args()
return args
Expand All @@ -137,8 +136,9 @@ def main():
else:
model = load_model()
model.eval()

image_size = args.input_shape[-1]
input_shape = (1,3,224,224)
args.input_shape = input_shape
image_size = input_shape[-1]

data_loader_kwargs = { 'worker_init_fn':work_init, 'num_workers' : args.num_workers}
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
Expand Down Expand Up @@ -179,7 +179,7 @@ def main():
'config_file': args.config_file
}
print(kwargs)
sim = QuantizationSimModel(model.cpu(), input_shapes=args.input_shape, **kwargs)
sim = QuantizationSimModel(model.cpu(), input_shapes=input_shape, **kwargs)

# Manually Config Super group, AIMET currently does not support [Conv-ReLU6] in a supergroup
from aimet_torch.qc_quantize_op import QcPostTrainingWrapper
Expand Down
16 changes: 8 additions & 8 deletions zoo_torch/examples/eval_mobilenetv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,18 +88,17 @@ def arguments():

parser.add_argument('--model-path', help='Path to checkpoint directory to load from.', default = "./model/mv2qat_modeldef.pth", type=str)
parser.add_argument('--images-dir', help='Imagenet eval image', default='./ILSVRC2012/', type=str)
parser.add_argument('--input-shape', help='Model to an input image shape, (ex : [batch, channel, width, height]', default=(1,3,224,224))
parser.add_argument('--seed', help='Seed number for reproducibility', default=0)
parser.add_argument('--seed', help='Seed number for reproducibility', type = int, default=0)

parser.add_argument('--quant-tricks', help='Preprocessing prior to Quantization', choices=['BNfold', 'CLS', 'HBF', 'CLE', 'BC', 'adaround'], nargs = "+")
parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf', choices = ['tf', 'tf_enhanced', 'range_learning_tf', 'range_learning_tf_enhanced'])
parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest')
parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', default=8)
parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', default=8)
parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', type = int, default=8)
parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', type = int, default=8)
parser.add_argument('--config-file', help='Quantsim configuration file.', default=None, type=str)
parser.add_argument('--cuda', help='Enable cuda for a model', default=True)

parser.add_argument('--batch-size', help='Data batch size for a model', default=64)
parser.add_argument('--batch-size', help='Data batch size for a model', type = int, default=64)


args = parser.parse_args()
Expand All @@ -123,12 +122,13 @@ def main():
raise ValueError('Model path {} must be specified'.format(args.model_path))

model.eval()
image_size = args.input_shape[-1]
input_shape = (1,3,224,224)
image_size = input_shape[-1]
eval_func_quant = model_eval(args.images_dir + '/val/', image_size, batch_size=args.batch_size, num_workers=0, quant = True)
eval_func = model_eval(args.images_dir + '/val/', image_size, batch_size=args.batch_size, num_workers=16)

from aimet_common.defs import QuantScheme
from aimet_torch.pro.quantsim import QuantizationSimModel
from aimet_torch.quantsim import QuantizationSimModel
if hasattr(args, 'quant_scheme'):
if args.quant_scheme == 'range_learning_tf':
quant_scheme = QuantScheme.training_range_learning_with_tf_init
Expand All @@ -147,7 +147,7 @@ def main():
'config_file': args.config_file
}
print(kwargs)
sim = QuantizationSimModel(model.cpu(), input_shapes=args.input_shape, **kwargs)
sim = QuantizationSimModel(model.cpu(), input_shapes=input_shape, **kwargs)
sim.compute_encodings(eval_func_quant, (32, True))
post_quant_top1 = eval_func(sim.model.cuda(), (0, True))
print("Post Quant Top1 :", post_quant_top1)
Expand Down
4 changes: 2 additions & 2 deletions zoo_torch/examples/ssd_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def func_quant(model, iterations, use_cuda = True):

def get_simulations(model, args):
from aimet_common.defs import QuantScheme
from aimet_torch.pro.quantsim import QuantizationSimModel
from aimet_torch.quantsim import QuantizationSimModel
if hasattr(args, 'quant_scheme'):
if args.quant_scheme == 'range_learning_tf':
quant_scheme = QuantScheme.training_range_learning_with_tf_init
Expand All @@ -79,4 +79,4 @@ def get_simulations(model, args):
}
print(kwargs)
sim = QuantizationSimModel(model.cpu(), input_shapes=args.input_shape, **kwargs)
return sim
return sim

0 comments on commit e0b47fb

Please sign in to comment.