Skip to content

Commit

Permalink
[Bugfix] Properly propagate trust_remote_code settings (vllm-project#…
Browse files Browse the repository at this point in the history
…10047)

Signed-off-by: Zifei Tong <[email protected]>
  • Loading branch information
zifeitong authored Nov 6, 2024
1 parent 537231a commit 8cdec76
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 13 deletions.
7 changes: 4 additions & 3 deletions vllm/model_executor/models/chatglm.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,9 @@ def mm_input_mapper_for_glmv(
data: MultiModalData[object],
) -> Dict:
model_config = ctx.model_config
tokenizer = cached_get_tokenizer(model_config.tokenizer,
trust_remote_code=True)
tokenizer = cached_get_tokenizer(
model_config.tokenizer,
trust_remote_code=model_config.trust_remote_code)
if tokenizer is None:
raise RuntimeError("No HuggingFace processor is available "
"to process the image object")
Expand Down Expand Up @@ -525,7 +526,7 @@ def _parse_and_validate_image_input(
elif isinstance(pixel_values, list):
return torch.concat(pixel_values)
else:
raise TypeError("""pixel_values must be a torch.Tensor
raise TypeError("""pixel_values must be a torch.Tensor
or a list of torch.Tensor
""")
return GLMImagePixelInputs(pixel_values=pixel_values)
Expand Down
22 changes: 12 additions & 10 deletions vllm/model_executor/models/molmo.py
Original file line number Diff line number Diff line change
Expand Up @@ -844,9 +844,10 @@ def get_max_tokens(max_crops: int, crop_patches: int, left_margin: int,


def get_max_molmo_image_tokens(ctx: InputContext) -> int:
processor = cached_get_processor(ctx.model_config.model,
trust_remote_code=True,
revision=ctx.model_config.code_revision)
processor = cached_get_processor(
ctx.model_config.model,
trust_remote_code=ctx.model_config.trust_remote_code,
revision=ctx.model_config.code_revision)
image_processor = processor.image_processor
max_llm_image_tokens = get_max_tokens(
image_processor.max_crops,
Expand All @@ -870,9 +871,10 @@ def image_input_mapper_for_molmo(

def dummy_data_for_molmo(ctx: InputContext, seq_len: int,
mm_counts: Mapping[str, int]):
processor = cached_get_processor(ctx.model_config.model,
trust_remote_code=True,
revision=ctx.model_config.code_revision)
processor = cached_get_processor(
ctx.model_config.model,
trust_remote_code=ctx.model_config.trust_remote_code,
revision=ctx.model_config.code_revision)
image_processor = processor.image_processor

base_image_input_d = image_processor.image_patch_size
Expand Down Expand Up @@ -935,11 +937,11 @@ def input_processor_for_molmo(ctx: InputContext, inputs: DecoderOnlyInputs):
multi_modal_data = inputs.get("multi_modal_data")
image = None if multi_modal_data is None else multi_modal_data.get("image")

processor = cached_get_processor(ctx.model_config.model,
trust_remote_code=True,
revision=ctx.model_config.code_revision)

model_config = ctx.model_config
processor = cached_get_processor(
ctx.model_config.model,
trust_remote_code=model_config.trust_remote_code,
revision=ctx.model_config.code_revision)
tokenizer = cached_get_tokenizer(
model_config.tokenizer,
trust_remote_code=model_config.trust_remote_code)
Expand Down

0 comments on commit 8cdec76

Please sign in to comment.