Skip to content

Commit

Permalink
deps: bump vllm minimum version to 0.6.2
Browse files Browse the repository at this point in the history
  • Loading branch information
dtrifiro committed Sep 27, 2024
1 parent 087f760 commit 0cec41a
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 21 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
pyv: ["3.12"]
vllm_version:
# - "" # skip the pypi version as it will not work on CPU
- "git+https://github.com/vllm-project/[email protected].1.post2"
- "git+https://github.com/vllm-project/[email protected].2"
- "git+https://github.com/vllm-project/vllm@main"
- "git+https://github.com/opendatahub-io/vllm@main"

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ classifiers = [
requires-python = ">=3.9"
dynamic = ["version"]
dependencies = [
"vllm>=0.5.5",
"vllm>=0.6.2",
"prometheus_client==0.20.0",
"grpcio==1.62.2",
"grpcio-health-checking==1.62.2",
Expand Down
24 changes: 5 additions & 19 deletions src/vllm_tgis_adapter/http.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,9 @@
from typing import TYPE_CHECKING

from vllm.entrypoints.launcher import serve_http
from vllm.entrypoints.openai.api_server import build_app
from vllm.entrypoints.openai.api_server import build_app, init_app_state
from vllm.logger import init_logger

try:
from vllm.entrypoints.openai.api_server import init_app
except ImportError: # vllm > 0.6.1.post2
from vllm.entrypoints.openai.api_server import init_app_state


if TYPE_CHECKING:
import argparse

Expand All @@ -31,12 +25,9 @@ async def run_http_server(
# modified copy of vllm.entrypoints.openai.api_server.run_server that
# allows passing of the engine

try:
app = await init_app(engine, args) # type: ignore[arg-type]
except NameError: # vllm > 0.6.1.post2
app = build_app(args)
model_config = await engine.get_model_config()
init_app_state(engine, model_config, app.state, args)
app = build_app(args)
model_config = await engine.get_model_config()
init_app_state(engine, model_config, app.state, args)

serve_kwargs = {
"host": args.host,
Expand All @@ -50,12 +41,7 @@ async def run_http_server(
}
serve_kwargs.update(uvicorn_kwargs)

try:
shutdown_coro = await serve_http(app, engine, **serve_kwargs)
except TypeError:
# vllm 0.5.4 backwards compatibility
# HTTP server will not shut itself down when the engine dies
shutdown_coro = await serve_http(app, **serve_kwargs)
shutdown_coro = await serve_http(app, **serve_kwargs)

# launcher.serve_http returns a shutdown coroutine to await
# (The double await is intentional)
Expand Down

0 comments on commit 0cec41a

Please sign in to comment.