Skip to content

Commit

Permalink
Merge pull request #3 from kzaleskaa/feat/add-quantization
Browse files Browse the repository at this point in the history
Feat/add initial quantization
  • Loading branch information
kzaleskaa authored Jun 6, 2024
2 parents 3abf2ed + 46dcad9 commit 41624e5
Show file tree
Hide file tree
Showing 21 changed files with 1,421 additions and 42 deletions.
27 changes: 27 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,30 @@ You can override any parameter from command line like this
```bash
python src/train.py trainer.max_epochs=20 data.batch_size=64
```

## Results for BiFPN + FFNet

The base model was trained for 25 epochs. QAT was performed for 10 epochs.

**Baseline and Fuse**

<div align=center>

| Method | test/ssim (Per tensor) | model size (MB) (Per tensor) |
| ------------ | ---------------------- | ---------------------------- |
| **baseline** | 0.778 | 3.53 |
| **fuse** | 0.778 | 3.45 |

</div>

**PTQ, QAT, and PTQ + QAT (Per tensor and Per channel)**

<div align=center>

| Method | test/ssim (Per tensor) | model size (MB) (Per tensor) | test/ssim (Per channel) | model size (MB) (Per channel) |
| ------------- | ---------------------- | ---------------------------- | ----------------------- | ----------------------------- |
| **ptq** | 0.6480 | 0.96791 | 0.6518 | 0.9679 |
| **qat** | 0.7715 | 0.96791 | 0.7627 | 0.9681 |
| **ptq + qat** | 0.7724 | 0.96899 | 0.7626 | 0.9692 |

</div>
4 changes: 2 additions & 2 deletions configs/data/depth.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
_target_: src.data.depth_datamodule.DepthDataModule
data_dir: ${paths.data_dir}
batch_size: 32 # Needs to be divisible by the number of devices (e.g., if in a distributed setup)
num_workers: 4
batch_size: 128 # Needs to be divisible by the number of devices (e.g., if in a distributed setup)
num_workers: 8
pin_memory: False
30 changes: 30 additions & 0 deletions configs/experiment/example_train_baseline.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: depth
- override /model: depth
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["depth", "simple_depth_net"]

seed: 12345

trainer:
min_epochs: 10
max_epochs: 25
gradient_clip_val: 0.5

model:
optimizer:
lr: 0.002
compile: false

data:
batch_size: 64
34 changes: 34 additions & 0 deletions configs/experiment/fuse_batch_run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: depth
- override /model: depth
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["depth", "simple_depth_net"]

seed: 12345

fuse_batch: true

trainer:
min_epochs: 10
max_epochs: 25
gradient_clip_val: 0.5

model:
optimizer:
lr: 0.002
compile: false

data:
batch_size: 64

save_path: fuse_batch.pty
35 changes: 35 additions & 0 deletions configs/experiment/ptq_qat_run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: depth
- override /model: depth
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["depth", "simple_depth_net"]

seed: 12345

ptq: true
qat: true

trainer:
min_epochs: 10
max_epochs: 10
gradient_clip_val: 0.5

model:
optimizer:
lr: 0.002
compile: false

data:
batch_size: 64

save_path: ptq_qat_channel.pty
34 changes: 34 additions & 0 deletions configs/experiment/ptq_run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: depth
- override /model: depth
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["depth", "simple_depth_net"]

seed: 12345

ptq: true

trainer:
min_epochs: 10
max_epochs: 25
gradient_clip_val: 0.5

model:
optimizer:
lr: 0.002
compile: false

data:
batch_size: 64

save_path: ptq_tensor.pty
34 changes: 34 additions & 0 deletions configs/experiment/qat_run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# @package _global_

# to execute this experiment run:
# python train.py experiment=example

defaults:
- override /data: depth
- override /model: depth
- override /callbacks: default
- override /trainer: default

# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters

tags: ["depth", "simple_depth_net"]

seed: 12345

qat: true

trainer:
min_epochs: 10
max_epochs: 10
gradient_clip_val: 0.5

model:
optimizer:
lr: 0.002
compile: false

data:
batch_size: 64

save_path: qat_channel.pty
8 changes: 5 additions & 3 deletions configs/model/depth.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,20 @@ _target_: src.models.unet_module.UNETLitModule
optimizer:
_target_: torch.optim.Adam
_partial_: true
lr: 0.001
lr: 1e-3
weight_decay: 0.0

scheduler:
_target_: torch.optim.lr_scheduler.ReduceLROnPlateau
_partial_: true
mode: min
factor: 0.1
patience: 20
threshold: 0.0001
patience: 5
threshold_mode: "abs"

net:
_target_: src.models.components.depth_net.DepthNet
_target_: src.models.components.depth_net_efficient_ffn.DepthNet

# compile model for faster training with pytorch 2.0
compile: false
33 changes: 33 additions & 0 deletions configs/quantization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# @package _global_

defaults:
- _self_
- data: depth
- model: depth
- callbacks: default
- logger: null # set logger here or use command line (e.g. `python train.py logger=tensorboard`)
- trainer: default
- paths: default
- extras: default
- hydra: default

fuse_batch: false
ptq: false
qat: false

save_path: name.pty

quantizer:
config:
asymmetric: true
backend: "qnnpack"
disable_requantization_for_cat: true
per_tensor: false
work_dir: "quant_output"

task_name: "quantization"

tags: ["dev"]

# passing checkpoint path is necessary for quantization
ckpt_path: ???
7 changes: 0 additions & 7 deletions notebooks/example_model_results.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -114,13 +114,6 @@
"for i in range(5):\n",
" visualize_result(test_dataset[i][0], test_dataset[i][1], outputs[i])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
Loading

0 comments on commit 41624e5

Please sign in to comment.