Skip to content

Commit

Permalink
Allow model to be served under multiple names (vllm-project#2894)
Browse files Browse the repository at this point in the history
Co-authored-by: Alexandre Payot <[email protected]>
  • Loading branch information
hmellor and payoto authored Apr 18, 2024
1 parent 6dc1fc9 commit 66ded03
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 21 deletions.
8 changes: 4 additions & 4 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,18 +150,18 @@ async def authentication(request: Request, call_next):
logger.info(f"args: {args}")

if args.served_model_name is not None:
served_model = args.served_model_name
served_model_names = args.served_model_name
else:
served_model = args.model
served_model_names = [args.model]
engine_args = AsyncEngineArgs.from_cli_args(args)
engine = AsyncLLMEngine.from_engine_args(
engine_args, usage_context=UsageContext.OPENAI_API_SERVER)
openai_serving_chat = OpenAIServingChat(engine, served_model,
openai_serving_chat = OpenAIServingChat(engine, served_model_names,
args.response_role,
args.lora_modules,
args.chat_template)
openai_serving_completion = OpenAIServingCompletion(
engine, served_model, args.lora_modules)
engine, served_model_names, args.lora_modules)

app.root_path = args.root_path
uvicorn.run(app,
Expand Down
10 changes: 7 additions & 3 deletions vllm/entrypoints/openai/cli_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,15 @@ def make_arg_parser():
help="If provided, the server will require this key "
"to be presented in the header.")
parser.add_argument("--served-model-name",
nargs="+",
type=str,
default=None,
help="The model name used in the API. If not "
"specified, the model name will be the same as "
"the huggingface name.")
help="The model name(s) used in the API. If multiple "
"names are provided, the server will respond to any "
"of the provided names. The model name in the model "
"field of a response will be the first name in this "
"list. If not specified, the model name will be the "
"same as the `--model` argument.")
parser.add_argument(
"--lora-modules",
type=str,
Expand Down
8 changes: 4 additions & 4 deletions vllm/entrypoints/openai/serving_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ class OpenAIServingChat(OpenAIServing):

def __init__(self,
engine: AsyncLLMEngine,
served_model: str,
served_model_names: List[str],
response_role: str,
lora_modules: Optional[List[LoRA]] = None,
chat_template=None):
super().__init__(engine=engine,
served_model=served_model,
served_model_names=served_model_names,
lora_modules=lora_modules)
self.response_role = response_role
self._load_chat_template(chat_template)
Expand Down Expand Up @@ -109,7 +109,7 @@ async def chat_completion_stream_generator(
result_generator: AsyncIterator[RequestOutput], request_id: str
) -> Union[ErrorResponse, AsyncGenerator[str, None]]:

model_name = request.model
model_name = self.served_model_names[0]
created_time = int(time.time())
chunk_object_type = "chat.completion.chunk"
first_iteration = True
Expand Down Expand Up @@ -251,7 +251,7 @@ async def chat_completion_full_generator(
result_generator: AsyncIterator[RequestOutput],
request_id: str) -> Union[ErrorResponse, ChatCompletionResponse]:

model_name = request.model
model_name = self.served_model_names[0]
created_time = int(time.time())
final_res: RequestOutput = None

Expand Down
6 changes: 3 additions & 3 deletions vllm/entrypoints/openai/serving_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ class OpenAIServingCompletion(OpenAIServing):

def __init__(self,
engine: AsyncLLMEngine,
served_model: str,
served_model_names: List[str],
lora_modules: Optional[List[LoRA]] = None):
super().__init__(engine=engine,
served_model=served_model,
served_model_names=served_model_names,
lora_modules=lora_modules)

async def create_completion(self, request: CompletionRequest,
Expand All @@ -79,7 +79,7 @@ async def create_completion(self, request: CompletionRequest,
return self.create_error_response(
"suffix is not currently supported")

model_name = request.model
model_name = self.served_model_names[0]
request_id = f"cmpl-{random_uuid()}"
created_time = int(time.time())

Expand Down
15 changes: 8 additions & 7 deletions vllm/entrypoints/openai/serving_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ class OpenAIServing:

def __init__(self,
engine: AsyncLLMEngine,
served_model: str,
served_model_names: List[str],
lora_modules=Optional[List[LoRA]]):
self.engine = engine
self.served_model = served_model
self.served_model_names = served_model_names
if lora_modules is None:
self.lora_requests = []
else:
Expand Down Expand Up @@ -74,13 +74,14 @@ async def _post_init(self):
async def show_available_models(self) -> ModelList:
"""Show available models. Right now we only have one model."""
model_cards = [
ModelCard(id=self.served_model,
root=self.served_model,
ModelCard(id=served_model_name,
root=self.served_model_names[0],
permission=[ModelPermission()])
for served_model_name in self.served_model_names
]
lora_cards = [
ModelCard(id=lora.lora_name,
root=self.served_model,
root=self.served_model_names[0],
permission=[ModelPermission()])
for lora in self.lora_requests
]
Expand Down Expand Up @@ -150,7 +151,7 @@ def create_streaming_error_response(
return json_str

async def _check_model(self, request) -> Optional[ErrorResponse]:
if request.model == self.served_model:
if request.model in self.served_model_names:
return
if request.model in [lora.lora_name for lora in self.lora_requests]:
return
Expand All @@ -160,7 +161,7 @@ async def _check_model(self, request) -> Optional[ErrorResponse]:
status_code=HTTPStatus.NOT_FOUND)

def _maybe_get_lora(self, request) -> Optional[LoRARequest]:
if request.model == self.served_model:
if request.model in self.served_model_names:
return
for lora in self.lora_requests:
if request.model == lora.lora_name:
Expand Down

0 comments on commit 66ded03

Please sign in to comment.