From ba4be44c32761d30f1e17656b863d2cc078af9e4 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Sat, 27 Apr 2024 11:17:45 -0700 Subject: [PATCH] [BugFix] Fix return type of executor execute_model methods (#4402) --- vllm/executor/cpu_executor.py | 2 +- vllm/executor/distributed_gpu_executor.py | 7 ++++--- vllm/executor/executor_base.py | 2 +- vllm/executor/gpu_executor.py | 2 +- vllm/executor/neuron_executor.py | 2 +- vllm/executor/ray_gpu_executor.py | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index aa810f9743395..e4436b2144bd3 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -109,7 +109,7 @@ async def execute_model_async( blocks_to_swap_in: Dict[int, int], blocks_to_swap_out: Dict[int, int], blocks_to_copy: Dict[int, List[int]], - ) -> SamplerOutput: + ) -> List[SamplerOutput]: output = await make_async(self.driver_worker.execute_model)( seq_group_metadata_list=seq_group_metadata_list, blocks_to_swap_in=blocks_to_swap_in, diff --git a/vllm/executor/distributed_gpu_executor.py b/vllm/executor/distributed_gpu_executor.py index 9dccfa4946391..4c922ef63ee04 100644 --- a/vllm/executor/distributed_gpu_executor.py +++ b/vllm/executor/distributed_gpu_executor.py @@ -1,5 +1,5 @@ from abc import abstractmethod -from typing import Any, Dict, Optional, Set, Tuple +from typing import Any, Dict, List, Optional, Set, Tuple from vllm.executor.executor_base import ExecutorAsyncBase from vllm.executor.gpu_executor import GPUExecutor @@ -52,7 +52,7 @@ def initialize_cache(self, num_gpu_blocks: int, num_gpu_blocks=num_gpu_blocks, num_cpu_blocks=num_cpu_blocks) - def execute_model(self, *args, **kwargs) -> SamplerOutput: + def execute_model(self, *args, **kwargs) -> List[SamplerOutput]: all_outputs = self._run_workers("execute_model", driver_args=args, driver_kwargs=kwargs) @@ -105,7 +105,8 @@ async def _run_workers_async( """Runs the given method on all workers.""" raise NotImplementedError - async def execute_model_async(self, *args, **kwargs) -> SamplerOutput: + async def execute_model_async(self, *args, + **kwargs) -> List[SamplerOutput]: all_outputs = await self._run_workers_async("execute_model", driver_args=args, driver_kwargs=kwargs) diff --git a/vllm/executor/executor_base.py b/vllm/executor/executor_base.py index 1838c34be2fda..c36aa18fb25bb 100644 --- a/vllm/executor/executor_base.py +++ b/vllm/executor/executor_base.py @@ -112,7 +112,7 @@ async def execute_model_async( blocks_to_swap_in: Dict[int, int], blocks_to_swap_out: Dict[int, int], blocks_to_copy: Dict[int, List[int]], - ) -> SamplerOutput: + ) -> List[SamplerOutput]: """Executes one model step on the given sequences.""" raise NotImplementedError diff --git a/vllm/executor/gpu_executor.py b/vllm/executor/gpu_executor.py index d2c60a3b68e14..5ac62f02b99c7 100644 --- a/vllm/executor/gpu_executor.py +++ b/vllm/executor/gpu_executor.py @@ -163,7 +163,7 @@ async def execute_model_async( blocks_to_swap_in: Dict[int, int], blocks_to_swap_out: Dict[int, int], blocks_to_copy: Dict[int, List[int]], - ) -> SamplerOutput: + ) -> List[SamplerOutput]: output = await make_async(self.driver_worker.execute_model)( seq_group_metadata_list=seq_group_metadata_list, blocks_to_swap_in=blocks_to_swap_in, diff --git a/vllm/executor/neuron_executor.py b/vllm/executor/neuron_executor.py index 5a137d1bdcb3b..f406287f3c1d8 100644 --- a/vllm/executor/neuron_executor.py +++ b/vllm/executor/neuron_executor.py @@ -84,7 +84,7 @@ async def execute_model_async( blocks_to_swap_in: Dict[int, int], blocks_to_swap_out: Dict[int, int], blocks_to_copy: Dict[int, List[int]], - ) -> SamplerOutput: + ) -> List[SamplerOutput]: output = await make_async(self.driver_worker.execute_model)( seq_group_metadata_list=seq_group_metadata_list, ) return output diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 1082984828357..b6bcda4e6b18c 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -188,7 +188,7 @@ def execute_model(self, blocks_to_swap_in: Dict[int, int], blocks_to_swap_out: Dict[int, int], blocks_to_copy: Dict[int, List[int]], - num_lookahead_slots: int = 0) -> SamplerOutput: + num_lookahead_slots: int = 0) -> List[SamplerOutput]: all_outputs = self._run_workers( "execute_model", driver_kwargs={