Skip to content

Commit

Permalink
move_modules_to_save_to_protocol
Browse files Browse the repository at this point in the history
  • Loading branch information
s.m.kochetkov committed Nov 4, 2024
1 parent f3a735d commit 0860f9c
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 3 deletions.
2 changes: 1 addition & 1 deletion vllm/model_executor/models/interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class SupportsLoRA(Protocol):
supported_lora_modules: ClassVar[List[str]]
embedding_modules: ClassVar[Dict[str, str]]
embedding_padding_modules: ClassVar[List[str]]
modules_to_save: ClassVar[List[str]]
modules_to_save: ClassVar[List[str]] = ["lm_head", "embed_tokens"]

# lora_config is None when LoRA is not enabled
def __init__(self, *, lora_config: Optional["LoRAConfig"] = None) -> None:
Expand Down
2 changes: 0 additions & 2 deletions vllm/model_executor/models/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,8 +495,6 @@ class LlamaForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
"norm": "model.norm"
}

modules_to_save = ["lm_head", "embed_tokens"]

def __init__(
self,
config: LlamaConfig,
Expand Down

0 comments on commit 0860f9c

Please sign in to comment.