-
Notifications
You must be signed in to change notification settings - Fork 169
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Support chat models in
dstack-proxy
(#1953)
This commit adds the OpenAI-compatible endpoint to `dstack-proxy`, which effectively allows running services with model mappings without a gateway. Most of the OpenAI- and TGI-specific code is copied from `dstack-gateway`. This code duplication will be eliminated later, once `dstack-proxy` supports running on gateways. The commit also contains some refactoring in `dstack-proxy`: introduces `ProxyError` and `UnexpectedProxyError` exceptions and simplifies error logging in `service_proxy.py`.
- Loading branch information
Showing
21 changed files
with
930 additions
and
147 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
from fastapi import HTTPException, status | ||
|
||
|
||
class ProxyError(HTTPException): | ||
"""Errors in dstack-proxy that are caused by and should be reported to the user""" | ||
|
||
def __init__(self, detail: str, code: int = status.HTTP_400_BAD_REQUEST) -> None: | ||
super().__init__(detail=detail, status_code=code) | ||
|
||
|
||
class UnexpectedProxyError(RuntimeError): | ||
"""Internal errors in dstack-proxy that should have never happened""" | ||
|
||
pass |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
from typing import AsyncIterator | ||
|
||
from fastapi import APIRouter, Depends, status | ||
from fastapi.responses import StreamingResponse | ||
from typing_extensions import Annotated | ||
|
||
from dstack._internal.proxy.deps import ProxyAuth, get_proxy_repo | ||
from dstack._internal.proxy.errors import ProxyError, UnexpectedProxyError | ||
from dstack._internal.proxy.repos.base import BaseProxyRepo | ||
from dstack._internal.proxy.schemas.model_proxy import ( | ||
ChatCompletionsChunk, | ||
ChatCompletionsRequest, | ||
ChatCompletionsResponse, | ||
Model, | ||
ModelsResponse, | ||
) | ||
from dstack._internal.proxy.services.model_proxy import get_chat_client | ||
from dstack._internal.proxy.services.service_connection import get_service_replica_client | ||
|
||
router = APIRouter(dependencies=[Depends(ProxyAuth(auto_enforce=True))]) | ||
|
||
|
||
@router.get("/{project_name}/models") | ||
async def get_models( | ||
project_name: str, repo: Annotated[BaseProxyRepo, Depends(get_proxy_repo)] | ||
) -> ModelsResponse: | ||
models = await repo.list_models(project_name) | ||
data = [ | ||
Model(id=m.name, created=int(m.created_at.timestamp()), owned_by=project_name) | ||
for m in models | ||
] | ||
return ModelsResponse(data=data) | ||
|
||
|
||
@router.post("/{project_name}/chat/completions", response_model=ChatCompletionsResponse) | ||
async def post_chat_completions( | ||
project_name: str, | ||
body: ChatCompletionsRequest, | ||
repo: Annotated[BaseProxyRepo, Depends(get_proxy_repo)], | ||
): | ||
model = await repo.get_model(project_name, body.model) | ||
if model is None: | ||
raise ProxyError( | ||
f"Model {body.model} not found in project {project_name}", status.HTTP_404_NOT_FOUND | ||
) | ||
service = await repo.get_service(project_name, model.run_name) | ||
if service is None or not service.replicas: | ||
raise UnexpectedProxyError( | ||
f"Model {model.name} in project {project_name} references run {model.run_name}" | ||
" that does not exist or has no replicas" | ||
) | ||
http_client = await get_service_replica_client(project_name, service, repo) | ||
client = get_chat_client(model, http_client) | ||
if not body.stream: | ||
return await client.generate(body) | ||
else: | ||
return StreamingResponse( | ||
stream_chunks(client.stream(body)), | ||
media_type="text/event-stream", | ||
headers={"X-Accel-Buffering": "no"}, | ||
) | ||
|
||
|
||
async def stream_chunks(chunks: AsyncIterator[ChatCompletionsChunk]) -> AsyncIterator[bytes]: | ||
async for chunk in chunks: | ||
yield f"data:{chunk.json()}\n\n".encode() | ||
yield "data: [DONE]\n\n".encode() |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
from typing import Any, Dict, List, Literal, Optional, Union | ||
|
||
from dstack._internal.core.models.common import CoreModel | ||
|
||
FinishReason = Literal["stop", "length", "tool_calls", "eos_token"] | ||
|
||
|
||
class ChatMessage(CoreModel): | ||
role: str # TODO(egor-s) types | ||
content: str | ||
|
||
|
||
class ChatCompletionsRequest(CoreModel): | ||
messages: List[ChatMessage] | ||
model: str | ||
frequency_penalty: Optional[float] = 0.0 | ||
logit_bias: Dict[str, float] = {} | ||
max_tokens: Optional[int] = None | ||
n: int = 1 | ||
presence_penalty: float = 0.0 | ||
response_format: Optional[Dict] = None | ||
seed: Optional[int] = None | ||
stop: Optional[Union[str, List[str]]] = None | ||
stream: bool = False | ||
temperature: Optional[float] = 1.0 | ||
top_p: Optional[float] = 1.0 | ||
tools: List[Any] = [] | ||
tool_choice: Union[Literal["none", "auto"], Dict] = {} | ||
user: Optional[str] = None | ||
|
||
|
||
class ChatCompletionsChoice(CoreModel): | ||
finish_reason: FinishReason | ||
index: int | ||
message: ChatMessage | ||
|
||
|
||
class ChatCompletionsChunkChoice(CoreModel): | ||
delta: object | ||
logprobs: object = {} | ||
finish_reason: Optional[FinishReason] | ||
index: int | ||
|
||
|
||
class ChatCompletionsUsage(CoreModel): | ||
completion_tokens: int | ||
prompt_tokens: int | ||
total_tokens: int | ||
|
||
|
||
class ChatCompletionsResponse(CoreModel): | ||
id: str | ||
choices: List[ChatCompletionsChoice] | ||
created: int | ||
model: str | ||
system_fingerprint: str = "" | ||
object: Literal["chat.completion"] = "chat.completion" | ||
usage: ChatCompletionsUsage | ||
|
||
|
||
class ChatCompletionsChunk(CoreModel): | ||
id: str | ||
choices: List[ChatCompletionsChunkChoice] | ||
created: int | ||
model: str | ||
system_fingerprint: str = "" | ||
object: Literal["chat.completion.chunk"] = "chat.completion.chunk" | ||
|
||
|
||
class Model(CoreModel): | ||
object: Literal["model"] = "model" | ||
id: str | ||
created: int | ||
owned_by: str | ||
|
||
|
||
class ModelsResponse(CoreModel): | ||
object: Literal["list"] = "list" | ||
data: List[Model] |
23 changes: 23 additions & 0 deletions
23
src/dstack/_internal/proxy/services/model_proxy/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
import httpx | ||
|
||
from dstack._internal.proxy.errors import UnexpectedProxyError | ||
from dstack._internal.proxy.repos.base import ChatModel | ||
from dstack._internal.proxy.services.model_proxy.clients import ChatCompletionsClient | ||
from dstack._internal.proxy.services.model_proxy.clients.openai import OpenAIChatCompletions | ||
from dstack._internal.proxy.services.model_proxy.clients.tgi import TGIChatCompletions | ||
|
||
|
||
def get_chat_client(model: ChatModel, http_client: httpx.AsyncClient) -> ChatCompletionsClient: | ||
if model.format_spec.format == "tgi": | ||
return TGIChatCompletions( | ||
http_client=http_client, | ||
chat_template=model.format_spec.chat_template, | ||
eos_token=model.format_spec.eos_token, | ||
) | ||
elif model.format_spec.format == "openai": | ||
return OpenAIChatCompletions( | ||
http_client=http_client, | ||
prefix=model.format_spec.prefix, | ||
) | ||
else: | ||
raise UnexpectedProxyError(f"Unsupported model format {model.format_spec.format}") |
18 changes: 18 additions & 0 deletions
18
src/dstack/_internal/proxy/services/model_proxy/clients/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
from abc import ABC, abstractmethod | ||
from typing import AsyncIterator | ||
|
||
from dstack._internal.proxy.schemas.model_proxy import ( | ||
ChatCompletionsChunk, | ||
ChatCompletionsRequest, | ||
ChatCompletionsResponse, | ||
) | ||
|
||
|
||
class ChatCompletionsClient(ABC): | ||
@abstractmethod | ||
async def generate(self, request: ChatCompletionsRequest) -> ChatCompletionsResponse: | ||
pass | ||
|
||
@abstractmethod | ||
async def stream(self, request: ChatCompletionsRequest) -> AsyncIterator[ChatCompletionsChunk]: | ||
yield |
37 changes: 37 additions & 0 deletions
37
src/dstack/_internal/proxy/services/model_proxy/clients/openai.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
from typing import AsyncIterator | ||
|
||
import httpx | ||
|
||
from dstack._internal.proxy.errors import ProxyError | ||
from dstack._internal.proxy.schemas.model_proxy import ( | ||
ChatCompletionsChunk, | ||
ChatCompletionsRequest, | ||
ChatCompletionsResponse, | ||
) | ||
from dstack._internal.proxy.services.model_proxy.clients import ChatCompletionsClient | ||
|
||
|
||
class OpenAIChatCompletions(ChatCompletionsClient): | ||
def __init__(self, http_client: httpx.AsyncClient, prefix: str): | ||
self._http = http_client | ||
self._prefix = prefix | ||
|
||
async def generate(self, request: ChatCompletionsRequest) -> ChatCompletionsResponse: | ||
resp = await self._http.post( | ||
f"{self._prefix}/chat/completions", json=request.dict(exclude_unset=True) | ||
) | ||
if resp.status_code != 200: | ||
raise ProxyError(resp.text) | ||
return ChatCompletionsResponse.__response__.parse_raw(resp.content) | ||
|
||
async def stream(self, request: ChatCompletionsRequest) -> AsyncIterator[ChatCompletionsChunk]: | ||
async with self._http.stream( | ||
"POST", f"{self._prefix}/chat/completions", json=request.dict(exclude_unset=True) | ||
) as resp: | ||
async for line in resp.aiter_lines(): | ||
if not line.startswith("data:"): | ||
continue | ||
data = line[len("data:") :].strip() | ||
if data == "[DONE]": | ||
break | ||
yield ChatCompletionsChunk.__response__.parse_raw(data) |
Oops, something went wrong.