From 06f53ba9be4abb25cbf614fde16b45ff8e14ca64 Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Thu, 19 Dec 2024 23:21:33 +0000 Subject: [PATCH] Lint again, to fixed point --- vllm/model_executor/models/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index b268375ce8a4b..ee8dc07a756b7 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -231,8 +231,8 @@ def forward( attn_metadata, fp8_comp_scales=(self.attn._q_scale, self.attn._prob_scale, - self.o_proj.input_scale - if self.attn_fp8_out else None)) + self.o_proj.input_scale if + self.attn_fp8_out else None)) output, _ = self.o_proj(attn_output) return output