Skip to content

Commit

Permalink
[Interleaved ATTN] Support for Mistral-8B (vllm-project#10591)
Browse files Browse the repository at this point in the history
Signed-off-by: youkaichao <[email protected]>
Co-authored-by: youkaichao <[email protected]>
Signed-off-by: cedonley <[email protected]>
  • Loading branch information
2 people authored and cedonley committed Dec 7, 2024
1 parent f5c1aae commit 0e98317
Showing 1 changed file with 15 additions and 1 deletion.
16 changes: 15 additions & 1 deletion vllm/model_executor/models/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@

from .interfaces import SupportsLoRA, SupportsPP
from .utils import (AutoWeightsLoader, PPMissingLayer, WeightsMapper,
is_pp_missing_parameter,
extract_layer_index, is_pp_missing_parameter,
make_empty_intermediate_tensors_factory, make_layers,
maybe_prefix)

Expand Down Expand Up @@ -114,6 +114,7 @@ def __init__(
prefix: str = "",
) -> None:
super().__init__()
layer_idx = extract_layer_index(prefix)
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
Expand Down Expand Up @@ -168,13 +169,26 @@ def __init__(
rope_scaling=rope_scaling,
is_neox_style=is_neox_style,
)

if hasattr(config, "interleaved_sliding_window"):
if isinstance(config.interleaved_sliding_window, int):
sliding_window = config.interleaved_sliding_window
elif isinstance(config.interleaved_sliding_window, list):
sw_idx = layer_idx % len(config.interleaved_sliding_window)
sliding_window = config.interleaved_sliding_window[sw_idx]
else:
raise ValueError(f"{type(sliding_window)} is not supported.")
else:
sliding_window = None

self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
per_layer_sliding_window=sliding_window,
prefix=f"{prefix}.attn",
)

Expand Down

0 comments on commit 0e98317

Please sign in to comment.