Skip to content

Commit

Permalink
Add fixed target sizes
Browse files Browse the repository at this point in the history
  • Loading branch information
mgoin committed Jul 18, 2024
1 parent 529dfef commit 415c0b7
Showing 1 changed file with 12 additions and 12 deletions.
24 changes: 12 additions & 12 deletions tests/test_auto_fp8.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@
from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig

MODELS = [
"facebook/opt-125m",
"Qwen/Qwen2-0.5B-Instruct",
("facebook/opt-125m", 160),
("Qwen/Qwen2-0.5B-Instruct", 600),
]

@pytest.mark.parametrize("model_id", MODELS)
def test_dynamic_quantization(model_id):
@pytest.mark.parametrize("model_id,target_size", MODELS)
def test_dynamic_quantization(model_id, target_size):
quantized_model_dir = model_id.split("/")[-1] + "-fp8-dynamic"

quantize_config = BaseQuantizeConfig(
Expand All @@ -30,13 +30,13 @@ def test_dynamic_quantization(model_id):
model_size = os.path.getsize(f"{quantized_model_dir}/model.safetensors")
shutil.rmtree(quantized_model_dir)

# We expect the model to be < 160MB
target_size = 160 * (1024 * 1024)
# We expect the model to be a certain size
target_size = target_size * (1024 * 1024)
assert model_size < target_size


@pytest.mark.parametrize("model_id", MODELS)
def test_static_quantization(model_id):
@pytest.mark.parametrize("model_id,target_size", MODELS)
def test_static_quantization(model_id, target_size):
quantized_model_dir = model_id.split("/")[-1] + "-fp8-static"

tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
Expand All @@ -56,11 +56,11 @@ def test_static_quantization(model_id):
shutil.rmtree(quantized_model_dir)

# We expect the model to be < 160MB
target_size = 160 * (1024 * 1024)
target_size = target_size * (1024 * 1024)
assert model_size < target_size

@pytest.mark.parametrize("model_id", MODELS)
def test_kv_cache_static_quantization(model_id):
@pytest.mark.parametrize("model_id,target_size", MODELS)
def test_kv_cache_static_quantization(model_id, target_size):
quantized_model_dir = model_id.split("/")[-1] + "-fp8-static-kv"

tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
Expand Down Expand Up @@ -94,5 +94,5 @@ def test_kv_cache_static_quantization(model_id):
shutil.rmtree(quantized_model_dir)

# We expect the model to be < 160MB
target_size = 160 * (1024 * 1024)
target_size = target_size * (1024 * 1024)
assert model_size < target_size

0 comments on commit 415c0b7

Please sign in to comment.