Skip to content

Commit

Permalink
[Misc] Allow passing logits_soft_cap for xformers backend (vllm-proje…
Browse files Browse the repository at this point in the history
…ct#11252)

Signed-off-by: Isotr0py <[email protected]>
  • Loading branch information
Isotr0py authored Dec 17, 2024
1 parent 02222a0 commit f9ecbb1
Showing 1 changed file with 3 additions and 5 deletions.
8 changes: 3 additions & 5 deletions vllm/attention/backends/xformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@
is_all_cross_attn_metadata_set, is_all_encoder_attn_metadata_set)
from vllm.attention.ops.paged_attn import (PagedAttention,
PagedAttentionMetadata)
from vllm.logger import init_logger

logger = init_logger(__name__)
from vllm.utils import print_warning_once


class XFormersBackend(AttentionBackend):
Expand Down Expand Up @@ -386,8 +384,8 @@ def __init__(
raise ValueError(
"XFormers does not support block-sparse attention.")
if logits_soft_cap is not None:
raise ValueError(
"XFormers does not support attention logits soft capping.")
print_warning_once("XFormers does not support logits soft cap. "
"Outputs may be slightly off.")
self.num_heads = num_heads
self.head_size = head_size
self.scale = float(scale)
Expand Down

0 comments on commit f9ecbb1

Please sign in to comment.