From f21ff9ae58176369c075050f676701d3dbf66609 Mon Sep 17 00:00:00 2001 From: mzusman Date: Sun, 8 Dec 2024 18:30:23 +0200 Subject: [PATCH] Revert Mamba MS fix, will fix it in future PR Signed-off-by: mzusman --- vllm/attention/backends/placeholder_attn.py | 4 ---- vllm/worker/multi_step_model_runner.py | 4 +--- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/vllm/attention/backends/placeholder_attn.py b/vllm/attention/backends/placeholder_attn.py index b444349d152a4..888adbffb8578 100644 --- a/vllm/attention/backends/placeholder_attn.py +++ b/vllm/attention/backends/placeholder_attn.py @@ -114,10 +114,6 @@ class PlaceholderAttentionMetadata(AttentionMetadata): _cached_prefill_metadata: Optional["PlaceholderAttentionMetadata"] = None _cached_decode_metadata: Optional["PlaceholderAttentionMetadata"] = None - def advance_step(self, *args, **kwargs): - # No need to do anything here - pass - @property def prefill_metadata(self) -> Optional["PlaceholderAttentionMetadata"]: if self.num_prefills == 0: diff --git a/vllm/worker/multi_step_model_runner.py b/vllm/worker/multi_step_model_runner.py index e08a61e31fe42..3ca0d88a42183 100644 --- a/vllm/worker/multi_step_model_runner.py +++ b/vllm/worker/multi_step_model_runner.py @@ -29,9 +29,7 @@ logger = init_logger(__name__) -MULTI_STEP_ATTENTION_BACKENDS = [ - "FLASH_ATTN", "ROCM_FLASH", "FLASHINFER", "NO_ATTENTION" -] +MULTI_STEP_ATTENTION_BACKENDS = ["FLASH_ATTN", "ROCM_FLASH", "FLASHINFER"] MULTI_STEP_CHUNKED_PREFILL_ATTENTION_BACKENDS = ["FLASH_ATTN"] def _get_supported_attention_backends(chunked_prefill_enabled: bool) \