From d21df498360ae084eb5619113aadd638877ab9a6 Mon Sep 17 00:00:00 2001 From: Varun Sundar Rabindranath Date: Fri, 6 Dec 2024 15:24:59 -0500 Subject: [PATCH] input_batch.py -> gpu_input_batch.py Signed-off-by: Varun Sundar Rabindranath --- vllm/v1/worker/{input_batch.py => gpu_input_batch.py} | 0 vllm/v1/worker/gpu_model_runner.py | 2 +- vllm/v1/worker/lora_model_runner_mixin.py | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename vllm/v1/worker/{input_batch.py => gpu_input_batch.py} (100%) diff --git a/vllm/v1/worker/input_batch.py b/vllm/v1/worker/gpu_input_batch.py similarity index 100% rename from vllm/v1/worker/input_batch.py rename to vllm/v1/worker/gpu_input_batch.py diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 0cf4ea31bce7e..6189e7bb7d8b3 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -21,7 +21,7 @@ FlashAttentionMetadata) from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.sample.metadata import SamplingMetadata -from vllm.v1.worker.input_batch import CachedRequestState, InputBatch +from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin if TYPE_CHECKING: diff --git a/vllm/v1/worker/lora_model_runner_mixin.py b/vllm/v1/worker/lora_model_runner_mixin.py index a8e96859658f6..19156fd75e5e3 100644 --- a/vllm/v1/worker/lora_model_runner_mixin.py +++ b/vllm/v1/worker/lora_model_runner_mixin.py @@ -14,7 +14,7 @@ from vllm.lora.request import LoRARequest from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager from vllm.model_executor.models import supports_lora, supports_multimodal -from vllm.v1.worker.input_batch import InputBatch +from vllm.v1.worker.gpu_input_batch import InputBatch logger = init_logger(__name__)