Skip to content

Commit

Permalink
Add int4 weight-only QAT flow targeting tinygemm kernel
Browse files Browse the repository at this point in the history
Summary: This commit adds an int4 weight-only QAT flow targeting
the efficient tinygemm kernel. This means during fine-tuning
we only simulate numerics of the kernel in bf16, but we only
actually call the kernel after quantizing the model. For more
detail, see pytorch/ao#383.

Test Plan: TODO
  • Loading branch information
andrewor14 committed Sep 12, 2024
1 parent 7c51100 commit 782d154
Show file tree
Hide file tree
Showing 2 changed files with 103 additions and 35 deletions.
8 changes: 6 additions & 2 deletions recipes/eleuther_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,11 +222,15 @@ def _setup_model(
) -> nn.Module:
with training.set_default_dtype(self._dtype), self._device:
model = config.instantiate(model_cfg)

if self._quantization_mode is not None:
model = self._quantizer.quantize(model)
model = model.to(device=self._device, dtype=self._dtype)

model.load_state_dict(model_state_dict)
for k, v in model_state_dict.items():
model_state_dict[k] = v.to(self._device)
model.load_state_dict(model_state_dict, assign=True)
else:
model.load_state_dict(model_state_dict)

# Put model in eval mode.
# Note: This will not disable the dropout applied in SDPA,
Expand Down
130 changes: 97 additions & 33 deletions torchtune/training/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,34 @@

from typing import Callable, Optional

from torchao.dtypes import TensorCoreTiledLayoutType
from torchao.quantization import int8_dynamic_activation_int4_weight, quantize_
from torchao.quantization.prototype.qat import (
disable_4w_fake_quant,
disable_8da4w_fake_quant,
enable_4w_fake_quant,
enable_8da4w_fake_quant,
Int4WeightOnlyQATQuantizer,
Int8DynActInt4WeightQATQuantizer,
)
from torchao.quantization.prototype.qat._module_swap_api import (
disable_4w_fake_quant_module_swap,
disable_8da4w_fake_quant_module_swap,
enable_4w_fake_quant_module_swap,
enable_8da4w_fake_quant_module_swap,
Int4WeightOnlyQATQuantizerModuleSwap,
Int8DynActInt4WeightQATQuantizerModuleSwap,
)


__all__ = [
"get_quantizer_mode",
"Int4WeightOnlyQuantizer",
"Int4WeightOnlyQATQuantizer",
"Int4WeightOnlyQATQuantizerModuleSwap",
"Int8DynActInt4WeightQuantizer",
"Int8DynActInt4WeightQATQuantizer",
"Int8DynActInt4WeightQATQuantizerModuleSwap",
]


Expand All @@ -16,47 +42,85 @@
_quantizer_mode_to_enable_fake_quant = {}


from torchao.quantization.quant_api import Int8DynActInt4WeightQuantizer
# ========================================================
# int8 dynamic activations + int4 weight tensor subclass |
# ========================================================

__all__.append("Int8DynActInt4WeightQuantizer")
_quantizer_to_mode[Int8DynActInt4WeightQuantizer] = "8da4w"

class Int8DynActInt4WeightQuantizer:
"""
Quantizer for applying int8 per token dynamic activation + int4
per group weight quantization to linear layers in the model.
"""

from torchao.quantization.prototype.qat import (
disable_8da4w_fake_quant,
enable_8da4w_fake_quant,
Int8DynActInt4WeightQATQuantizer,
)
def __init__(self, groupsize: int = 256):
self.groupsize = groupsize

__all__.append("Int8DynActInt4WeightQATQuantizer")
def quantize(self, model):
quantize_fn = int8_dynamic_activation_int4_weight(self.groupsize)
quantize_(model, quantize_fn)
return model


_quantizer_to_mode[Int8DynActInt4WeightQuantizer] = "8da4w"
_quantizer_to_mode[Int8DynActInt4WeightQATQuantizer] = "8da4w-qat"
_quantizer_mode_to_disable_fake_quant["8da4w-qat"] = disable_8da4w_fake_quant
_quantizer_mode_to_enable_fake_quant["8da4w-qat"] = enable_8da4w_fake_quant

try:
# Note: QAT tensor subclass implementation in torchao only works
# with FSDP2 today. For other distribution strategies like DDP and
# FSDP1, users will need to fall back to the old module swap flow.
# TODO: remove this try catch once we upgrade to torchao 0.5.0

from torchao.quantization.prototype.qat._module_swap_api import (
disable_8da4w_fake_quant_module_swap,
enable_8da4w_fake_quant_module_swap,
Int8DynActInt4WeightQATQuantizerModuleSwap,
)

__all__.append("Int8DynActInt4WeightQATQuantizerModuleSwap")
_quantizer_to_mode[
Int8DynActInt4WeightQATQuantizerModuleSwap
] = "8da4w-qat-module-swap"
_quantizer_mode_to_disable_fake_quant[
"8da4w-qat-module-swap"
] = disable_8da4w_fake_quant_module_swap
_quantizer_mode_to_enable_fake_quant[
"8da4w-qat-module-swap"
] = enable_8da4w_fake_quant_module_swap
except ImportError:
pass

# ==================
# int4 weight only |
# ==================


class Int4WeightOnlyQuantizer:
"""
Quantizer for applying int4 per group weight only quantization
to linear layers in the model using the efficient tinygemm kernel.
"""

def __init__(self, groupsize: int = 128, inner_k_tiles: int = 8):
self.groupsize = groupsize
self.inner_k_tiles = inner_k_tiles

def quantize(self, model):
layout_type = TensorCoreTiledLayoutType(self.inner_k_tiles)
quantize_fn = int4_weight_only(self.groupsize, layout_type)
quantize_(model, quantize_fn)
return model


_quantizer_to_mode[Int4WeightOnlyQuantizer] = "4w"
_quantizer_to_mode[Int4WeightOnlyQATQuantizer] = "4w-qat"
_quantizer_mode_to_disable_fake_quant["4w-qat"] = disable_4w_fake_quant
_quantizer_mode_to_enable_fake_quant["4w-qat"] = enable_4w_fake_quant


# =============
# module swap |
# =============

# Note: QAT tensor subclass implementation in torchao only works
# with FSDP2 today. For other distribution strategies like DDP and
# FSDP1, users will need to fall back to the old module swap flow.

# int4 weight-only
_quantizer_to_mode[Int4WeightOnlyQATQuantizerModuleSwap] = "4w-qat-module-swap"
_quantizer_mode_to_disable_fake_quant[
"4w-qat-module-swap"
] = disable_4w_fake_quant_module_swap
_quantizer_mode_to_enable_fake_quant[
"4w-qat-module-swap"
] = enable_4w_fake_quant_module_swap

# int8 dynamic activations + int4 weight
_quantizer_to_mode[Int8DynActInt4WeightQATQuantizerModuleSwap] = "8da4w-qat-module-swap"
_quantizer_mode_to_disable_fake_quant[
"8da4w-qat-module-swap"
] = disable_8da4w_fake_quant_module_swap
_quantizer_mode_to_enable_fake_quant[
"8da4w-qat-module-swap"
] = enable_8da4w_fake_quant_module_swap


def get_quantizer_mode(quantizer: Optional[Callable]) -> Optional[str]:
Expand Down

0 comments on commit 782d154

Please sign in to comment.