From 886cb941ffb7c3412f02011c319e7b3cb68cccea Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Wed, 26 Jun 2024 17:11:25 -0700 Subject: [PATCH] format --- examples/llava_next_example.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/examples/llava_next_example.py b/examples/llava_next_example.py index 7acf374968d2c..e90a86abe41cb 100644 --- a/examples/llava_next_example.py +++ b/examples/llava_next_example.py @@ -1,16 +1,14 @@ -import requests from io import BytesIO +import requests from PIL import Image -from vllm import LLM +from vllm import LLM, SamplingParams from vllm.multimodal.image import ImagePixelData -from vllm import SamplingParams - -# Dynamic image input is currently not supported and therefore +# Dynamic image input is currently not supported and therefore # a fixed image input shape and its corresponding feature size is required. -# See https://github.com/vllm-project/vllm/pull/4199 for the complete +# See https://github.com/vllm-project/vllm/pull/4199 for the complete # configuration matrix. llm = LLM( @@ -26,13 +24,15 @@ image = Image.open(BytesIO(requests.get(url).content)) sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=100) -outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": ImagePixelData(image), -}, sampling_params=sampling_params) +outputs = llm.generate( + { + "prompt": prompt, + "multi_modal_data": ImagePixelData(image), + }, + sampling_params=sampling_params) generated_text = "" for o in outputs: generated_text += o.outputs[0].text - + print(f"LLM output:{generated_text}")