Skip to content

Commit

Permalink
[Misc][VLM][Doc] Consolidate offline examples for vision language mod…
Browse files Browse the repository at this point in the history
…els (vllm-project#6858)

Co-authored-by: Cyrus Leung <[email protected]>
  • Loading branch information
ywang96 and DarkLight1337 authored Jul 27, 2024
1 parent 0a06e4c commit ab4258e
Show file tree
Hide file tree
Showing 7 changed files with 174 additions and 212 deletions.
31 changes: 0 additions & 31 deletions examples/fuyu_example.py

This file was deleted.

25 changes: 0 additions & 25 deletions examples/llava_example.py

This file was deleted.

36 changes: 0 additions & 36 deletions examples/llava_next_example.py

This file was deleted.

55 changes: 0 additions & 55 deletions examples/minicpmv_example.py

This file was deleted.

174 changes: 174 additions & 0 deletions examples/offline_inference_vision_language.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
"""
This example shows how to use vLLM for running offline inference
with the correct prompt format on vision language models.
For most models, the prompt format should follow corresponding examples
on HuggingFace model repository.
"""
from transformers import AutoTokenizer

from vllm import LLM, SamplingParams
from vllm.assets.image import ImageAsset
from vllm.utils import FlexibleArgumentParser

# Input image and question
image = ImageAsset("cherry_blossom").pil_image.convert("RGB")
question = "What is the content of this image?"


# LLaVA-1.5
def run_llava(question):

prompt = f"USER: )\n{question}'
}]
prompt = tokenizer.apply_chat_template(messages,
tokenize=False,
add_generation_prompt=True)
return llm, prompt


model_example_map = {
"llava": run_llava,
"llava-next": run_llava_next,
"fuyu": run_fuyu,
"phi3_v": run_phi3v,
"paligemma": run_paligemma,
"chameleon": run_chameleon,
"minicpmv": run_minicpmv,
}


def main(args):
model = args.model_type
if model not in model_example_map:
raise ValueError(f"Model type {model} is not supported.")

llm, prompt = model_example_map[model](question)

# We set temperature to 0.2 so that outputs can be different
# even when all prompts are identical when running batch inference.
sampling_params = SamplingParams(temperature=0.2, max_tokens=64)

assert args.num_prompts > 0
if args.num_prompts == 1:
# Single inference
inputs = {
"prompt": prompt,
"multi_modal_data": {
"image": image
},
}

else:
# Batch inference
inputs = [{
"prompt": prompt,
"multi_modal_data": {
"image": image
},
} for _ in range(args.num_prompts)]

outputs = llm.generate(inputs, sampling_params=sampling_params)

for o in outputs:
generated_text = o.outputs[0].text
print(generated_text)


if __name__ == "__main__":
parser = FlexibleArgumentParser(
description='Demo on using vLLM for offline inference with '
'vision language models')
args = parser.parse_args()
parser.add_argument('--model-type',
'-m',
type=str,
default="llava",
choices=model_example_map.keys(),
help='Huggingface "model_type".')
parser.add_argument('--num-prompts',
type=int,
default=1,
help='Number of prompts to run.')

args = parser.parse_args()
main(args)
25 changes: 0 additions & 25 deletions examples/paligemma_example.py

This file was deleted.

40 changes: 0 additions & 40 deletions examples/phi3v_example.py

This file was deleted.

0 comments on commit ab4258e

Please sign in to comment.