From 91c9ebbb1bfc39e98aa2bd444b9569e5f2f92c9e Mon Sep 17 00:00:00 2001 From: Robert Shaw <114415538+robertgshaw2-neuralmagic@users.noreply.github.com> Date: Sun, 3 Nov 2024 19:24:40 -0500 Subject: [PATCH] [V1] Fix Configs (#9971) --- vllm/v1/executor/gpu_executor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm/v1/executor/gpu_executor.py b/vllm/v1/executor/gpu_executor.py index de56332240192..f71fa16b16e27 100644 --- a/vllm/v1/executor/gpu_executor.py +++ b/vllm/v1/executor/gpu_executor.py @@ -1,7 +1,7 @@ import os from typing import Optional, Tuple -from vllm.config import EngineConfig +from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.utils import get_distributed_init_method, get_ip, get_open_port from vllm.v1.outputs import ModelRunnerOutput @@ -12,7 +12,8 @@ class GPUExecutor: - def __init__(self, vllm_config: EngineConfig) -> None: + def __init__(self, vllm_config: VllmConfig) -> None: + self.vllm_config = vllm_config self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config