Skip to content

Commit

Permalink
[Bugfix] Fix _init_vision_model in NVLM_D model (vllm-project#9611)
Browse files Browse the repository at this point in the history
Co-authored-by: Isotr0py <[email protected]>
Signed-off-by: Erkin Sagiroglu <[email protected]>
  • Loading branch information
2 people authored and Erkin Sagiroglu committed Oct 26, 2024
1 parent 6a4ea6d commit 15e022c
Showing 1 changed file with 28 additions and 9 deletions.
37 changes: 28 additions & 9 deletions vllm/model_executor/models/nvlm_d.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,31 @@ def _init_mlp1(self, config: PretrainedConfig) -> nn.Sequential:
nn.Linear(llm_intermediate_size, llm_hidden_size, bias=False),
)

def _init_vision_model(self, config: PretrainedConfig,
quant_config: Optional[QuantizationConfig],
num_hidden_layers: int):
# We added additional dummy heads to the original num of heads to make
# the number of heads divisible by 8.
return InternVisionModel(config.vision_config,
quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers,
num_dummy_heads=7)
def _init_vision_model(
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig],
*,
is_mono: bool,
prefix: str,
):
if not is_mono:
vision_feature_layer = config.select_layer
if vision_feature_layer < 0:
num_hidden_layers = config.vision_config.num_hidden_layers \
+ vision_feature_layer + 1
else:
num_hidden_layers = vision_feature_layer + 1

# We added additional dummy heads to the original num of heads to
# make the number of heads divisible by 8.
return InternVisionModel(
config.vision_config,
quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers,
num_dummy_heads=7,
prefix=prefix,
)
else:
msg = "Monolith mode is not applicable to NVLM_D"
raise NotImplementedError(msg)

0 comments on commit 15e022c

Please sign in to comment.