Skip to content

Commit

Permalink
Update flashinfer.py
Browse files Browse the repository at this point in the history
  • Loading branch information
noamgat authored Jul 18, 2024
1 parent 1d7959c commit 8ca76b2
Showing 1 changed file with 2 additions and 3 deletions.
5 changes: 2 additions & 3 deletions vllm/attention/backends/flashinfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,14 @@
import torch

from vllm import _custom_ops as ops
from vllm.attention.backends.abstract import (AttentionBackend,
AttentionImpl,
from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
AttentionMetadata,
AttentionMetadataBuilder,
AttentionType)
from vllm.attention.ops.paged_attn import PagedAttention
from vllm.attention.backends.utils import (PAD_SLOT_ID, compute_slot_mapping,
compute_slot_mapping_start_idx,
is_block_tables_empty)
from vllm.attention.ops.paged_attn import PagedAttention
from vllm.sequence import SequenceGroupMetadata
from vllm.utils import get_kv_cache_torch_dtype, make_tensor_with_pad

Expand Down

0 comments on commit 8ca76b2

Please sign in to comment.