diff --git a/aphrodite/common/config.py b/aphrodite/common/config.py index 4a490e4de..ce5e19afd 100644 --- a/aphrodite/common/config.py +++ b/aphrodite/common/config.py @@ -19,6 +19,7 @@ from aphrodite.platforms import current_platform from aphrodite.quantization import QUANTIZATION_METHODS from aphrodite.transformers_utils.config import (ConfigFormat, get_config, + get_hf_image_processor_config, get_hf_text_config) from aphrodite.triton_utils import HAS_TRITON @@ -203,6 +204,8 @@ def __init__( code_revision, rope_scaling, rope_theta, config_format) self.hf_text_config = get_hf_text_config(self.hf_config) + self.hf_image_processor_config = get_hf_image_processor_config( + self.model, revision) self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype) # Choose a default enforce_eager value if the user did not specify diff --git a/aphrodite/inputs/registry.py b/aphrodite/inputs/registry.py index 5f81c7031..e567b7bea 100644 --- a/aphrodite/inputs/registry.py +++ b/aphrodite/inputs/registry.py @@ -2,8 +2,8 @@ from array import array from collections import UserDict from dataclasses import dataclass -from typing import (TYPE_CHECKING, Callable, Dict, Mapping, Optional, Protocol, - Tuple, Type) +from typing import (TYPE_CHECKING, Any, Callable, Dict, Mapping, Optional, + Protocol, Tuple, Type) from loguru import logger from torch import nn @@ -49,6 +49,13 @@ def get_hf_config(self, hf_config_type: Type[C] = PretrainedConfig) -> C: return hf_config + def get_hf_image_processor_config(self) -> Dict[str, Any]: + """ + Get the HuggingFace image processor configuration of the model. + """ + return self.model_config.hf_image_processor_config + + N = TypeVar("N", bound=Type[nn.Module]) diff --git a/aphrodite/modeling/models/phi3v.py b/aphrodite/modeling/models/phi3v.py index 7d47ae3a3..eeec38c4b 100644 --- a/aphrodite/modeling/models/phi3v.py +++ b/aphrodite/modeling/models/phi3v.py @@ -16,8 +16,8 @@ # limitations under the License. import re from functools import lru_cache -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, - TypedDict, Union) +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, + Tuple, TypedDict, Union) import numpy as np import torch @@ -320,12 +320,12 @@ def _calc_hd_transform_size(*, width: int, height: int, hd_num: int = 16): # Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L181 def get_phi3v_image_feature_size( - hf_config: PretrainedConfig, + hf_config: Dict[str, Any], *, input_height: int, input_width: int, ) -> int: - num_crops = getattr(hf_config, "num_crops", 16) + num_crops = hf_config.get("num_crops", 16) new_width, new_height = _calc_hd_transform_size(width=input_width, height=input_height, hd_num=num_crops) @@ -337,7 +337,7 @@ def get_phi3v_image_feature_size( def get_max_phi3v_image_tokens(ctx: InputContext): return get_phi3v_image_feature_size( - ctx.get_hf_config(PretrainedConfig), + ctx.get_hf_image_processor_config(), input_height=MAX_IMAGE_FEATURE_SIZE_HEIGHT, input_width=MAX_IMAGE_FEATURE_SIZE_WIDTH, ) @@ -391,7 +391,7 @@ def input_processor_for_phi3v(ctx: InputContext, llm_inputs: LLMInputs): return llm_inputs model_config = ctx.model_config - hf_config = ctx.get_hf_config(PretrainedConfig) + hf_config = ctx.get_hf_image_processor_config() image_data = multi_modal_data["image"] if isinstance(image_data, Image.Image): diff --git a/aphrodite/transformers_utils/config.py b/aphrodite/transformers_utils/config.py index 3a8d95186..83ff23f09 100644 --- a/aphrodite/transformers_utils/config.py +++ b/aphrodite/transformers_utils/config.py @@ -9,6 +9,8 @@ try_to_load_from_cache) from loguru import logger from transformers import GenerationConfig, PretrainedConfig +from transformers.models.auto.image_processing_auto import ( + get_image_processor_config) from transformers.models.auto.modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) from transformers.utils import CONFIG_NAME as HF_CONFIG_NAME @@ -243,6 +245,17 @@ def recurse_elems(elem: Any): return config +def get_hf_image_processor_config( + model: Union[str, Path], + revision: Optional[str] = None, + **kwargs, +) -> Dict[str, Any]: + # Separate model folder from file path for GGUF models + if Path(model).is_file() and Path(model).suffix == ".gguf": + model = Path(model).parent + return get_image_processor_config(model, revision=revision, **kwargs) + + def get_hf_text_config(config: PretrainedConfig): """Get the "sub" config relevant to llm for multi modal models. No op for pure text models. diff --git a/examples/vision/vision_example.py b/examples/vision/vision_example.py index df6e3c501..51646bdec 100644 --- a/examples/vision/vision_example.py +++ b/examples/vision/vision_example.py @@ -58,7 +58,7 @@ def run_phi3v(question): # In this example, we override max_num_seqs to 5 while # keeping the original context length of 128k. llm = LLM( - model="microsoft/Phi-3-vision-128k-instruct", + model="microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, max_num_seqs=5, ) diff --git a/tests/models/test_phi3v.py b/tests/models/test_phi3v.py index a55454b8a..9d496eb46 100644 --- a/tests/models/test_phi3v.py +++ b/tests/models/test_phi3v.py @@ -21,7 +21,7 @@ "<|user|>\n<|image_1|>\nWhat is the season?<|end|>\n<|assistant|>\n", }) -models = ["microsoft/Phi-3-vision-128k-instruct"] +models = ["microsoft/Phi-3.5-vision-instruct"] def aphrodite_to_hf_output(aphrodite_output: Tuple[List[int], str,