From b47a739880b5240e1930e3bfe57cf8ab6ba234c2 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 23 Oct 2024 20:12:38 +0800 Subject: [PATCH] Update vllm/model_executor/models/qwen2_vl.py --- vllm/model_executor/models/qwen2_vl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 446d2d87fcc4f..3dc955b12ba0e 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -777,8 +777,8 @@ def _expand_pad_tokens(inputs: list, token_id: int, make_batched_fn: Callable, data_type_key (str): The type of the multi-modal input. image_processor (Any): The image processor used to process the inputs. prompt_token_ids (List[int]): The list of token IDs in the prompt. - min_pixels int: min pixels to used for img processing - max_pixels int: max pixels to be used for img processing + min_pixels (int): min pixels to used for img processing + max_pixels (int): max pixels to be used for img processing Returns: List[int]: The list of token IDs for the multi-modal inputs.