From b5d6381dafe0c2e86a1968976749787892d78fdf Mon Sep 17 00:00:00 2001 From: mgoin Date: Wed, 24 Jul 2024 22:05:02 +0000 Subject: [PATCH] Revert "Add smoke test for fp8 kv cache" This reverts commit 05a23982513dd59bc7f379b54a37c4ea803c7ea7. --- tests/quantization/test_fp8.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/tests/quantization/test_fp8.py b/tests/quantization/test_fp8.py index cadf71a3c3233..0602fedf0b8e3 100644 --- a/tests/quantization/test_fp8.py +++ b/tests/quantization/test_fp8.py @@ -60,18 +60,12 @@ def test_kv_cache_model_load_and_run(vllm_runner, model_id: str): @pytest.mark.skipif(not is_quant_method_supported("fp8"), reason="FP8 is not supported on this GPU type.") -@pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"]) -def test_load_fp16_model(vllm_runner, kv_cache_dtype: str) -> None: - with vllm_runner("facebook/opt-125m", quantization="fp8", kv_cache_dtype=kv_cache_dtype) as llm: +def test_load_fp16_model(vllm_runner) -> None: + with vllm_runner("facebook/opt-125m", quantization="fp8") as llm: model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model # noqa: E501 fc1 = model.model.decoder.layers[0].fc1 assert isinstance(fc1.quant_method, Fp8LinearMethod) - if kv_cache_dtype == "fp8": - attn = model.model.decoder.layers[0].self_attn.attn - assert isinstance(attn.quant_method, Fp8KVCacheMethod) - assert attn._k_scale == 1.0 - assert attn._v_scale == 1.0 capability = torch.cuda.get_device_capability() capability = capability[0] * 10 + capability[1]