Skip to content

Commit

Permalink
fix mm
Browse files Browse the repository at this point in the history
  • Loading branch information
comaniac committed Jul 22, 2024
1 parent 4cc12ec commit fb00532
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions vllm/worker/model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ class InterDataForSeqGroup:
prompt_adapter_request: Optional[PromptAdapterRequest] = None

# Multi-modal inputs.
multi_modal_inputs_list: Optional[MultiModalInputs] = None
multi_modal_inputs: Optional[MultiModalInputs] = None

# Whether the prefix cache is hit (prefill only).
prefix_cache_hit: bool = False
Expand Down Expand Up @@ -439,7 +439,7 @@ def _compute_multi_modal_input(self, inter_data: InterDataForSeqGroup,
return

mm_kwargs = self.multi_modal_input_mapper(mm_data)
inter_data.multi_modal_inputs_list = mm_kwargs
inter_data.multi_modal_inputs = mm_kwargs

def add_seq_group(self, seq_group_metadata: SequenceGroupMetadata):
"""Add a sequence group to the builder."""
Expand Down Expand Up @@ -565,8 +565,8 @@ def build(self) -> ModelInputForGPU:

# Multi-modal data.
multi_modal_inputs_list = [
m for data in self.inter_data_list
for m in [data.multi_modal_inputs_list] if m
data.multi_modal_inputs for data in self.inter_data_list
if data.multi_modal_inputs is not None
]
multi_modal_kwargs = MultiModalInputs.batch(multi_modal_inputs_list,
device=self.runner.device)
Expand Down

0 comments on commit fb00532

Please sign in to comment.