Skip to content

Commit

Permalink
Deprecating sync_openai
Browse files Browse the repository at this point in the history
  • Loading branch information
gshtras committed Dec 16, 2024
1 parent 1a8e549 commit 78440dc
Show file tree
Hide file tree
Showing 4 changed files with 1 addition and 545 deletions.
2 changes: 0 additions & 2 deletions vllm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.engine.llm_engine import LLMEngine
from vllm.entrypoints.fast_sync_llm import FastSyncLLM
from vllm.entrypoints.llm import LLM
from vllm.executor.ray_utils import initialize_ray_cluster
from vllm.inputs import PromptType, TextPrompt, TokensPrompt
Expand All @@ -22,7 +21,6 @@
"__version__",
"__version_tuple__",
"LLM",
"FastSyncLLM",
"ModelRegistry",
"PromptType",
"TextPrompt",
Expand Down
2 changes: 1 addition & 1 deletion vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
get_hf_text_config, get_pooling_config,
get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope)
from vllm.utils import (GiB_bytes, LayerBlockType, cuda_device_count_stateless,
is_mi250, is_navi, get_cpu_memory, print_warning_once,
get_cpu_memory, is_mi250, is_navi, print_warning_once,
random_uuid, resolve_obj_by_qualname)

if TYPE_CHECKING:
Expand Down
127 changes: 0 additions & 127 deletions vllm/entrypoints/fast_sync_llm.py

This file was deleted.

Loading

0 comments on commit 78440dc

Please sign in to comment.