Skip to content

Commit

Permalink
Fix Phi3V prompt replacement
Browse files Browse the repository at this point in the history
Signed-off-by: DarkLight1337 <[email protected]>
  • Loading branch information
DarkLight1337 committed Dec 20, 2024
1 parent b981a9d commit b2dac49
Showing 1 changed file with 6 additions and 2 deletions.
8 changes: 6 additions & 2 deletions vllm/multimodal/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -918,6 +918,10 @@ def _apply_prompt_replacements(
tokenizer = self._get_tokenizer()

token_matches = find_token_matches(token_ids, prompt_repls)
match_count_by_modality = {
modality: len(matches)
for modality, matches in full_groupby_modality(token_matches)
}

# If the search text does not represent a special token,
# it may have different token IDs in the prompt, because
Expand All @@ -930,8 +934,8 @@ def _apply_prompt_replacements(
# of the search text in the prompt, we instead perform string
# replacement on the decoded token IDs, then encode them back.
if all(
len(matches) >= mm_item_counts[modality]
for modality, matches in full_groupby_modality(token_matches)
match_count_by_modality.get(modality, 0) >= item_count
for modality, item_count in mm_item_counts.items()
): # yapf: disable
token_ids = replace_token_matches(
token_ids,
Expand Down

0 comments on commit b2dac49

Please sign in to comment.