Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Model] Initialize Fuyu-8B support #3924

Merged
merged 49 commits into from
Jul 14, 2024
Merged
Show file tree
Hide file tree
Changes from 41 commits
Commits
Show all changes
49 commits
Select commit Hold shift + click to select a range
89f97ba
support persimmon model
Isotr0py Apr 8, 2024
f51e0a4
optimize persimmon code
Isotr0py Apr 9, 2024
09d126e
make ruff happy
Isotr0py Apr 9, 2024
07592ae
Add Fuyu support
Isotr0py Apr 10, 2024
3a24825
Merge branch 'vllm-project:main' into fuyu
Isotr0py Apr 10, 2024
28d4707
Merge branch 'main' into fuyu
Isotr0py Apr 17, 2024
9e4f994
Fix model loader
Isotr0py Apr 17, 2024
061e812
Fix several typos
Isotr0py Apr 17, 2024
d72810d
fix loader and add example
Isotr0py Apr 17, 2024
222124f
Add fuyu example
Isotr0py Apr 17, 2024
9f190cc
Reformat code
Isotr0py Apr 17, 2024
ca8ddc4
Fix a typo in fuyu example
Isotr0py Apr 17, 2024
bf4def6
rmove fuyu from vision config loading
Isotr0py Apr 17, 2024
ef666cd
Make image_input_shape and image_feature_size
Isotr0py Apr 17, 2024
5815536
format code
Isotr0py Apr 17, 2024
9fce2f0
Fix isort
Isotr0py Apr 17, 2024
65cfc08
expand vision_language_config assertion
Isotr0py Apr 18, 2024
260ab10
Merge branch 'main' into fuyu
Isotr0py Apr 18, 2024
5da3a32
Merge remote-tracking branch 'upstream/main' into fuyu
Isotr0py Jun 25, 2024
dd28d7d
Merge branch 'main' into fuyu
Isotr0py Jun 25, 2024
3e55b0a
revert arg_utils mistake change
Isotr0py Jun 25, 2024
e5a6418
rebase fuyu
Isotr0py Jun 25, 2024
5454031
fix fuyu dynamic shape
Isotr0py Jun 25, 2024
7b64800
add fuyu test
Isotr0py Jun 26, 2024
1f9c405
Merge branch 'vllm-project:main' into fuyu
Isotr0py Jun 26, 2024
de57a56
fix fuyu processor
Isotr0py Jun 26, 2024
cefa770
debug persimmon
Isotr0py Jun 26, 2024
867cb36
fix fuyu test
Isotr0py Jun 27, 2024
b8e8004
Merge branch 'vllm-project:main' into fuyu
Isotr0py Jun 28, 2024
c6e779c
update model flag
Isotr0py Jun 28, 2024
4134084
fix wrong used RowParallelLinear
Isotr0py Jun 28, 2024
dc31551
Merge branch 'vllm-project:main' into fuyu
Isotr0py Jul 5, 2024
018336f
refactor fuyu
Isotr0py Jul 6, 2024
a006ac1
Merge branch 'main' into fuyu
Isotr0py Jul 7, 2024
c788ceb
fix fuyu input processor
Isotr0py Jul 7, 2024
71a38c3
refactor fuyu test
Isotr0py Jul 7, 2024
8a32199
fix fuyu test
Isotr0py Jul 7, 2024
b346bf2
reduce image size_factors
Isotr0py Jul 7, 2024
3b6e411
reduce scale factors
Isotr0py Jul 7, 2024
b7070c4
reduce size_factors
Isotr0py Jul 7, 2024
c55ff87
Merge branch 'vllm-project:main' into fuyu
Isotr0py Jul 8, 2024
71166ee
add monkey-patch for fuyu test
Isotr0py Jul 8, 2024
3a21aab
use shared merge_vision_embeddings and add dim check
Isotr0py Jul 8, 2024
6cff87a
reduce max_model_len
Isotr0py Jul 8, 2024
f806004
reduce size_factors
Isotr0py Jul 8, 2024
50e9c2d
add vllm_to_hf_output
Isotr0py Jul 8, 2024
bf89017
fix vllm_to_hf_output
Isotr0py Jul 8, 2024
004b0c0
Merge remote-tracking branch 'upstream/main' into fuyu
ywang96 Jul 13, 2024
b69ce43
Merge remote-tracking branch 'upstream/main' into fuyu
ywang96 Jul 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions docs/source/models/supported_models.rst
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,10 @@ Decoder-only Language Models
- Phi-3-Small
- :code:`microsoft/Phi-3-small-8k-instruct`, :code:`microsoft/Phi-3-small-128k-instruct`, etc.
-
* - :code:`PersimmonForCausalLM`
- Persimmon
- :code:`adept/persimmon-8b-base`, :code:`adept/persimmon-8b-chat`, etc.
-
* - :code:`QWenLMHeadModel`
- Qwen
- :code:`Qwen/Qwen-7B`, :code:`Qwen/Qwen-7B-Chat`, etc.
Expand Down Expand Up @@ -178,6 +182,10 @@ Vision Language Models
- Models
- Example HuggingFace Models
- :ref:`LoRA <lora>`
* - :code:`FuyuForCausalLM`
- Fuyu
- :code:`adept/fuyu-8b` etc.
-
* - :code:`LlavaForConditionalGeneration`
- LLaVA-1.5
- :code:`llava-hf/llava-1.5-7b-hf`, :code:`llava-hf/llava-1.5-13b-hf`, etc.
Expand Down
31 changes: 31 additions & 0 deletions examples/fuyu_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import requests
from PIL import Image

from vllm import LLM, SamplingParams


def run_fuyu():
llm = LLM(model="adept/fuyu-8b", max_model_len=4096)

# single-image prompt
prompt = "What is the highest life expectancy at of male?\n"
url = "https://huggingface.co/adept/fuyu-8b/resolve/main/chart.png"
image = Image.open(requests.get(url, stream=True).raw)
sampling_params = SamplingParams(temperature=0, max_tokens=64)

outputs = llm.generate(
{
"prompt": prompt,
"multi_modal_data": {
"image": image
},
},
sampling_params=sampling_params)

for o in outputs:
generated_text = o.outputs[0].text
print(generated_text)


if __name__ == "__main__":
run_fuyu()
131 changes: 131 additions & 0 deletions tests/models/test_fuyu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
from typing import List, Optional, Tuple, Type

import pytest

from vllm.multimodal.utils import rescale_image_size
from vllm.utils import is_cpu

from ..conftest import IMAGE_ASSETS, HfRunner, VllmRunner, _ImageAssets
from .utils import check_outputs_equal

pytestmark = pytest.mark.vlm

HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
"stop_sign": "What color is the stop sign?\n", # noqa: E501
"cherry_blossom": "What is the season?\n",
"boardwalk": "What's in this image?\n",
})

models = ["adept/fuyu-8b"]


def hf_to_vllm_output(hf_output: Tuple[List[int], str]):
"""remove image placeholder from prompts"""
hf_output_ids, hf_output_str = hf_output
hf_output_str = hf_output_str.split("<s> ")[1]
return hf_output_ids, hf_output_str


# FIXME: Fuyu in transformers miss get_output_embeddings method,
Isotr0py marked this conversation as resolved.
Show resolved Hide resolved
# which break the generate_greedy_logprobs for hf_runner.
# Use check_outputs_equal to pass the test temporarily until the issue is fixed.
def run_test(
hf_runner: Type[HfRunner],
vllm_runner: Type[VllmRunner],
image_assets: _ImageAssets,
model: str,
*,
size_factors: List[float],
dtype: str,
max_tokens: int,
num_logprobs: int,
tensor_parallel_size: int,
distributed_executor_backend: Optional[str] = None,
):
"""Inference result should be the same between hf and vllm.

All the image fixtures for the test is under tests/images.
For huggingface runner, we provide the PIL images as input.
For vllm runner, we provide MultiModalDataDict objects
and corresponding vision language config as input.
Note, the text input is also adjusted to abide by vllm contract.
The text output is sanitized to be able to compare with hf.
"""
images = [asset.pil_image for asset in image_assets]

inputs_per_image = [(
[prompt for _ in size_factors],
[rescale_image_size(image, factor) for factor in size_factors],
) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]

# NOTE: take care of the order. run vLLM first, and then run HF.
# vLLM needs a fresh new process without cuda initialization.
# if we run HF first, the cuda initialization will be done and it
# will hurt multiprocessing backend with fork method (the default method).

# max_model_len should be greater than image_feature_size
with vllm_runner(model,
max_model_len=2560,
max_num_seqs=1,
dtype=dtype,
tensor_parallel_size=tensor_parallel_size,
distributed_executor_backend=distributed_executor_backend,
enforce_eager=True) as vllm_model:
vllm_outputs_per_image = [
vllm_model.generate_greedy(prompts, max_tokens, images=vllm_images)
for prompts, vllm_images in inputs_per_image
]

with hf_runner(model, dtype=dtype) as hf_model:
hf_outputs_per_image = [
hf_model.generate_greedy(prompts, max_tokens, images=hf_images)
for prompts, hf_images in inputs_per_image
]

for hf_outputs, vllm_outputs in zip(hf_outputs_per_image,
vllm_outputs_per_image):
check_outputs_equal(
outputs_0_lst=[
hf_to_vllm_output(hf_output) for hf_output in hf_outputs
],
outputs_1_lst=vllm_outputs,
name_0="hf",
name_1="vllm",
)


target_dtype = "half"
if is_cpu():
target_dtype = "bfloat16"


@pytest.mark.parametrize("model", models)
@pytest.mark.parametrize(
"size_factors",
[
# No image
[],
# Single-scale
[0.25],
# Single-scale, batched
[0.25, 0.25, 0.25],
# Multi-scale
[0.25, 0.20, 0.15],
],
)
@pytest.mark.parametrize("dtype", [target_dtype])
@pytest.mark.parametrize("max_tokens", [128])
@pytest.mark.parametrize("num_logprobs", [10])
def test_models(hf_runner, vllm_runner, image_assets, model, size_factors,
dtype: str, max_tokens: int, num_logprobs: int) -> None:
run_test(
hf_runner,
vllm_runner,
image_assets,
model,
size_factors=size_factors,
dtype=dtype,
max_tokens=max_tokens,
num_logprobs=num_logprobs,
tensor_parallel_size=1,
)
2 changes: 2 additions & 0 deletions vllm/model_executor/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
"DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
"DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"),
"FalconForCausalLM": ("falcon", "FalconForCausalLM"),
"FuyuForCausalLM": ("fuyu", "FuyuForCausalLM"),
"GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
"Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"),
"GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
Expand All @@ -49,6 +50,7 @@
"OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
"OPTForCausalLM": ("opt", "OPTForCausalLM"),
"OrionForCausalLM": ("orion", "OrionForCausalLM"),
"PersimmonForCausalLM": ("persimmon", "PersimmonForCausalLM"),
"PaliGemmaForConditionalGeneration":
("paligemma", "PaliGemmaForConditionalGeneration"),
"PhiForCausalLM": ("phi", "PhiForCausalLM"),
Expand Down
Loading
Loading