Skip to content

Commit

Permalink
format
Browse files Browse the repository at this point in the history
  • Loading branch information
ywang96 committed Jun 27, 2024
1 parent 60f22a2 commit 886cb94
Showing 1 changed file with 11 additions and 11 deletions.
22 changes: 11 additions & 11 deletions examples/llava_next_example.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,14 @@
import requests
from io import BytesIO

import requests
from PIL import Image

from vllm import LLM
from vllm import LLM, SamplingParams
from vllm.multimodal.image import ImagePixelData
from vllm import SamplingParams


# Dynamic image input is currently not supported and therefore
# Dynamic image input is currently not supported and therefore
# a fixed image input shape and its corresponding feature size is required.
# See https://github.com/vllm-project/vllm/pull/4199 for the complete
# See https://github.com/vllm-project/vllm/pull/4199 for the complete
# configuration matrix.

llm = LLM(
Expand All @@ -26,13 +24,15 @@
image = Image.open(BytesIO(requests.get(url).content))
sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=100)

outputs = llm.generate({
"prompt": prompt,
"multi_modal_data": ImagePixelData(image),
}, sampling_params=sampling_params)
outputs = llm.generate(
{
"prompt": prompt,
"multi_modal_data": ImagePixelData(image),
},
sampling_params=sampling_params)

generated_text = ""
for o in outputs:
generated_text += o.outputs[0].text

print(f"LLM output:{generated_text}")

0 comments on commit 886cb94

Please sign in to comment.