-
-
Notifications
You must be signed in to change notification settings - Fork 5.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[V1] Refactor LLMEngine To Use Multiprocessing #9741
Closed
Closed
Changes from all commits
Commits
Show all changes
26 commits
Select commit
Hold shift + click to select a range
8f8662e
prototype
robertgshaw2-redhat 01c4ca8
revert spurious 2.5 changes
robertgshaw2-redhat 1ad8a48
stash
robertgshaw2-redhat f9084f6
cleanup
robertgshaw2-redhat 72bccd9
add MQLLMEnginev1
robertgshaw2-redhat a6cab52
work with MQLLMEngine
robertgshaw2-redhat 885ed16
format
robertgshaw2-redhat 3ed66cf
cleanup formatting
robertgshaw2-redhat 8ae8ce9
revert exmple change
robertgshaw2-redhat 5c72515
update comment
robertgshaw2-redhat f9b33fa
formatting
robertgshaw2-redhat 82539b9
updated
robertgshaw2-redhat d42a54e
stash
robertgshaw2-redhat 3a2d02a
format
robertgshaw2-redhat 6028ee1
Merge branch 'main' into rs-prototype-2
robertgshaw2-redhat 6bd37c1
update
robertgshaw2-redhat 196d822
revert bind/connect
robertgshaw2-redhat a089cd1
revert comment
robertgshaw2-redhat 974aa06
formatting
robertgshaw2-redhat fe1e1b4
formatting tweaks
robertgshaw2-redhat 9c27fbb
move detokenizer into engine
robertgshaw2-redhat 95b5af1
format
robertgshaw2-redhat 3999279
stash
robertgshaw2-redhat b4dd571
revert bad import
robertgshaw2-redhat f01f992
format
robertgshaw2-redhat be333fa
format
robertgshaw2-redhat File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
from dataclasses import dataclass | ||
from typing import List, Optional, Union | ||
|
||
import msgspec | ||
|
||
from vllm.lora.request import LoRARequest | ||
from vllm.sampling_params import RequestOutputKind, SamplingParams | ||
|
||
LLM_ENGINE_CORE_READY_STR = "READY" | ||
|
||
|
||
@dataclass | ||
class DetokenizerRequest: | ||
|
||
request_id: str | ||
prompt: Optional[str] | ||
prompt_token_ids: List[int] | ||
skip_special_tokens: bool | ||
spaces_between_special_tokens: bool | ||
output_kind: RequestOutputKind | ||
|
||
|
||
class EngineCoreRequest(msgspec.Struct): | ||
|
||
# NOTE: prompt and prompt_token_ids should be DecoderOnlyInput, | ||
# but this is not playing well with msgspec due to circular | ||
# imports and weird typing we have going on in data.py | ||
|
||
request_id: str | ||
prompt: Optional[str] | ||
prompt_token_ids: List[int] | ||
sampling_params: SamplingParams | ||
eos_token_id: Optional[int] | ||
arrival_time: float | ||
lora_request: Optional[LoRARequest] | ||
|
||
|
||
@dataclass | ||
class EngineCoreOutput: | ||
|
||
request_id: str | ||
new_token_ids: List[int] | ||
finished: bool | ||
finish_reason: Optional[str] = None | ||
stop_reason: Union[int, str, None] = None | ||
|
||
|
||
class EngineCoreOutputs(msgspec.Struct): | ||
|
||
# [num_reqs] | ||
outputs: List[EngineCoreOutput] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,216 @@ | ||
from dataclasses import dataclass | ||
from typing import Dict, List, Optional | ||
|
||
from vllm.logger import init_logger | ||
from vllm.outputs import CompletionOutput, RequestOutput | ||
from vllm.sampling_params import RequestOutputKind | ||
from vllm.transformers_utils.detokenizer_utils import ( | ||
AnyTokenizer, convert_prompt_ids_to_tokens, detokenize_incrementally) | ||
from vllm.transformers_utils.tokenizer import get_tokenizer | ||
from vllm.v1.engine import DetokenizerRequest, EngineCoreOutput | ||
|
||
logger = init_logger(__name__) | ||
|
||
|
||
@dataclass | ||
class DetokenizerRequestState: | ||
|
||
# Generation data | ||
output_text: str | ||
tokens: List[str] | ||
token_ids: List[int] | ||
|
||
# Metadata for incremental detokenization | ||
prefix_offset: int | ||
read_offset: int | ||
|
||
# Parameters for detokenization | ||
skip_special_tokens: bool | ||
spaces_between_special_tokens: bool | ||
output_kind: RequestOutputKind | ||
|
||
# Request output (Cached + updated incrementally) | ||
request_output: RequestOutput | ||
|
||
@classmethod | ||
def from_new_request(cls, tokenizer: AnyTokenizer, | ||
request: DetokenizerRequest): | ||
|
||
tokens, prefix_offset, read_offset = convert_prompt_ids_to_tokens( | ||
tokenizer=tokenizer, | ||
prompt_ids=request.prompt_token_ids, | ||
skip_special_tokens=request.skip_special_tokens, | ||
) | ||
|
||
request_output = cls._initialize_request_output( | ||
request.request_id, | ||
request.prompt, | ||
request.prompt_token_ids, | ||
) | ||
|
||
return cls( | ||
output_text="", | ||
tokens=tokens, | ||
# Detokenizer mutates this list, so need a unique copy. | ||
token_ids=request.prompt_token_ids.copy(), | ||
prefix_offset=prefix_offset, | ||
read_offset=read_offset, | ||
skip_special_tokens=request.skip_special_tokens, | ||
spaces_between_special_tokens=request. | ||
spaces_between_special_tokens, | ||
output_kind=request.output_kind, | ||
request_output=request_output) | ||
|
||
@staticmethod | ||
def _initialize_request_output( | ||
request_id: str, prompt: str, | ||
prompt_token_ids: List[int]) -> RequestOutput: | ||
"""Initialize a new RequestOutput object.""" | ||
|
||
# TODO: Support `n` > 1. | ||
completion_output = CompletionOutput( | ||
index=0, | ||
text="", | ||
token_ids=[], | ||
cumulative_logprob=None, | ||
logprobs=None, # TODO | ||
finish_reason=None, | ||
stop_reason=None, | ||
lora_request=None, | ||
) | ||
|
||
return RequestOutput( | ||
request_id=request_id, | ||
prompt=prompt, | ||
prompt_token_ids=prompt_token_ids, | ||
prompt_logprobs=None, # TODO | ||
outputs=[completion_output], | ||
finished=False, | ||
metrics=None, | ||
lora_request=None, | ||
encoder_prompt=None, | ||
encoder_prompt_token_ids=None, | ||
) | ||
|
||
|
||
class Detokenizer: | ||
|
||
def __init__(self, tokenizer_name: str): | ||
self.tokenizer = get_tokenizer(tokenizer_name) | ||
|
||
# Request id -> DetokenizerRequestState | ||
self.request_states: Dict[str, DetokenizerRequestState] = {} | ||
|
||
def get_num_unfinished_requests(self): | ||
return len(self.request_states) | ||
|
||
def has_unfinished_requests(self) -> bool: | ||
return len(self.request_states) > 0 | ||
|
||
def add_request(self, request: DetokenizerRequest) -> None: | ||
"""Add new request to the Detokenizer.""" | ||
|
||
assert (request.request_id not in self.request_states) | ||
|
||
request_state = DetokenizerRequestState.from_new_request( | ||
self.tokenizer, request) | ||
self.request_states[request.request_id] = request_state | ||
|
||
def step( | ||
self, encore_core_outputs: List[EngineCoreOutput] | ||
) -> List[RequestOutput]: | ||
"""Update the detokenizer state with the new tokens from EngineCore.""" | ||
|
||
request_outputs: List[RequestOutput] = [] | ||
for engine_core_output in encore_core_outputs: | ||
request_id = engine_core_output.request_id | ||
request_state = self.request_states[request_id] | ||
|
||
# Detokenize and update state. | ||
self._update_request_state( | ||
tokenizer=self.tokenizer, | ||
request_state=request_state, | ||
new_token_ids=engine_core_output.new_token_ids, | ||
finished=engine_core_output.finished, | ||
finish_reason=engine_core_output.finish_reason, | ||
stop_reason=engine_core_output.stop_reason, | ||
) | ||
request_outputs.append(request_state.request_output) | ||
|
||
# Free completed requests. | ||
if engine_core_output.finished: | ||
self._free(request_id) | ||
|
||
# Send RequestOutputs to EngineClient. | ||
return request_outputs | ||
|
||
def _free(self, request_id: str) -> None: | ||
"""Remove the request from the RequestState tracker.""" | ||
|
||
# TODO(robertgshaw2): should this be a del? | ||
assert request_id in self.request_states | ||
self.request_states.pop(request_id) | ||
|
||
@staticmethod | ||
def _update_request_state( | ||
tokenizer: AnyTokenizer, | ||
request_state: DetokenizerRequestState, | ||
new_token_ids: List[int], | ||
finished: bool, | ||
finish_reason: Optional[str], | ||
stop_reason: Optional[str], | ||
) -> None: | ||
""" | ||
Update RequestState for the request_id by: | ||
1) Detokenize the new token ids incrementally. | ||
2) Update the RequestOutput with the new text. | ||
""" | ||
|
||
# 1) Detokenize the new token ids incrementally. | ||
# TODO(woosuk): This method becomes very inefficient when the number of | ||
# new_token_ids is more than 1. We need to optimize this. | ||
decoded_text = "" | ||
for new_token_id in new_token_ids: | ||
request_state.token_ids.append(new_token_id) | ||
(new_tokens, new_decoded_token_text, prefix_offset, | ||
read_offset) = detokenize_incrementally( | ||
tokenizer=tokenizer, | ||
all_input_ids=request_state.token_ids, | ||
prev_tokens=request_state.tokens, | ||
prefix_offset=request_state.prefix_offset, | ||
read_offset=request_state.read_offset, | ||
skip_special_tokens=request_state.skip_special_tokens, | ||
spaces_between_special_tokens=request_state. | ||
spaces_between_special_tokens, | ||
) | ||
|
||
request_state.tokens.extend(new_tokens) | ||
request_state.prefix_offset = prefix_offset | ||
request_state.read_offset = read_offset | ||
request_state.output_text += new_decoded_token_text | ||
|
||
decoded_text += new_decoded_token_text | ||
|
||
# 2) Update the RequestOutput object with the new text. | ||
request_output = request_state.request_output | ||
completion_output = request_output.outputs[0] | ||
if request_state.output_kind == RequestOutputKind.CUMULATIVE: | ||
completion_output.text += decoded_text | ||
completion_output.token_ids = request_state.token_ids | ||
elif request_state.output_kind == RequestOutputKind.DELTA: | ||
completion_output.text = decoded_text | ||
num_prev_tokens = len(completion_output.token_ids) | ||
completion_output.token_ids = request_state.token_ids[ | ||
num_prev_tokens:] | ||
elif request_state.output_kind == RequestOutputKind.FINAL_ONLY: | ||
if finished: | ||
completion_output.text = request_state.output_text | ||
completion_output.token_ids = request_state.token_ids | ||
else: | ||
completion_output.text = "" | ||
completion_output.token_ids = [] | ||
|
||
if finished: | ||
completion_output.finish_reason = finish_reason | ||
completion_output.stop_reason = stop_reason | ||
request_output.finished = finished |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Make this a
@classmethod
forRequestOutput