Skip to content

Commit

Permalink
Add multimodal test for chat method
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkLight1337 committed Sep 3, 2024
1 parent f74b4b0 commit 371e733
Showing 1 changed file with 34 additions and 0 deletions.
34 changes: 34 additions & 0 deletions tests/entrypoints/llm/test_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from vllm import LLM, RequestOutput, SamplingParams

from ...conftest import cleanup
from ..openai.test_vision import TEST_IMAGE_URLS

MODEL_NAME = "facebook/opt-125m"

Expand Down Expand Up @@ -159,3 +160,36 @@ def test_chat():
]
outputs = llm.chat(messages)
assert len(outputs) == 1


@pytest.mark.parametrize("image_urls",
[TEST_IMAGE_URLS[0], TEST_IMAGE_URLS[1]])
def test_chat_multi_image(image_urls: List[str]):
llm = LLM(
model="microsoft/Phi-3.5-vision-instruct",
dtype="bfloat16",
max_model_len=4096,
max_num_seqs=5,
enforce_eager=True,
trust_remote_code=True,
limit_mm_per_prompt={"image": 2},
)

messages = [{
"role":
"user",
"content": [
*({
"type": "image_url",
"image_url": {
"url": image_url
}
} for image_url in image_urls),
{
"type": "text",
"text": "What's in this image?"
},
],
}]
outputs = llm.chat(messages)
assert len(outputs) >= 0

0 comments on commit 371e733

Please sign in to comment.