Skip to content

Commit

Permalink
Add smoke test for fp8 kv cache
Browse files Browse the repository at this point in the history
  • Loading branch information
mgoin committed Jul 24, 2024
1 parent 2125900 commit 05a2398
Showing 1 changed file with 8 additions and 2 deletions.
10 changes: 8 additions & 2 deletions tests/quantization/test_fp8.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,12 +60,18 @@ def test_kv_cache_model_load_and_run(vllm_runner, model_id: str):

@pytest.mark.skipif(not is_quant_method_supported("fp8"),
reason="FP8 is not supported on this GPU type.")
def test_load_fp16_model(vllm_runner) -> None:
with vllm_runner("facebook/opt-125m", quantization="fp8") as llm:
@pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"])
def test_load_fp16_model(vllm_runner, kv_cache_dtype: str) -> None:
with vllm_runner("facebook/opt-125m", quantization="fp8", kv_cache_dtype=kv_cache_dtype) as llm:

model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model # noqa: E501
fc1 = model.model.decoder.layers[0].fc1
assert isinstance(fc1.quant_method, Fp8LinearMethod)
if kv_cache_dtype == "fp8":
attn = model.model.decoder.layers[0].self_attn.attn
assert isinstance(attn.quant_method, Fp8KVCacheMethod)
assert attn._k_scale == 1.0
assert attn._v_scale == 1.0

capability = torch.cuda.get_device_capability()
capability = capability[0] * 10 + capability[1]
Expand Down

0 comments on commit 05a2398

Please sign in to comment.