Skip to content
This repository was archived by the owner on Oct 11, 2024. It is now read-only.

Commit 8e16aca

Browse files
committed
Update to use kernels from vllm-project#7651
1 parent 06b146e commit 8e16aca

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

vllm/model_executor/models/mamba.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,6 @@
44
from typing import Iterable, List, Optional, Tuple
55

66
import torch
7-
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
8-
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn
9-
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
107
from torch import nn
118
from torch.nn.parameter import Parameter
129
from transformers import MambaConfig
@@ -21,6 +18,10 @@
2118
MergedColumnParallelLinear,
2219
RowParallelLinear)
2320
from vllm.model_executor.layers.logits_processor import LogitsProcessor
21+
from vllm.model_executor.layers.mamba.ops.causal_conv1d import (
22+
causal_conv1d_fn, causal_conv1d_update)
23+
from vllm.model_executor.layers.mamba.ops.mamba_ssm import (
24+
selective_scan_fn, selective_state_update)
2425
from vllm.model_executor.layers.quantization.base_config import (
2526
QuantizationConfig)
2627
from vllm.model_executor.layers.sampler import Sampler
@@ -157,7 +158,7 @@ def mamba_forward(self,
157158
(self.conv_kernel_size - hidden_states.shape[-1], 0))
158159
cache_params.conv_state.copy_(conv_states)
159160

160-
hidden_states = causal_conv1d_fn(
161+
hidden_states, _ = causal_conv1d_fn(
161162
hidden_states,
162163
conv_weights,
163164
self.conv1d.bias,

0 commit comments

Comments
 (0)