From cde8b15f085975662ed2177cc2193f91815e4c7c Mon Sep 17 00:00:00 2001 From: charlifu Date: Thu, 13 Jun 2024 21:25:22 +0000 Subject: [PATCH] remove comment --- vllm/model_executor/layers/quantization/fp8_rocm.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vllm/model_executor/layers/quantization/fp8_rocm.py b/vllm/model_executor/layers/quantization/fp8_rocm.py index 0f0a77783233d..5401df72fb67a 100644 --- a/vllm/model_executor/layers/quantization/fp8_rocm.py +++ b/vllm/model_executor/layers/quantization/fp8_rocm.py @@ -24,10 +24,8 @@ class Fp8RocmConfig(QuantizationConfig): def __init__(self) -> None: - # self.quantized_weights_path = config["quantized_weights"] self._tuned = {} gemm_type = os.getenv("FP8_GEMM", "fp8_16") - #print(f"Integral Cross factor = {self.factor}") if gemm_type == "fp8_8": self.gemm_method = Fp8RocmLinearMethod.apply_fp8_8 tuned_filename = "/tmp/tuned_fp8_8.csv"