Skip to content

Commit

Permalink
[Bugfix] Handle <|tool_call|> token in granite tool parser (vllm-proj…
Browse files Browse the repository at this point in the history
…ect#11039)

Signed-off-by: Travis Johnson <[email protected]>
  • Loading branch information
tjohnson31415 authored and weilong.yu committed Dec 13, 2024
1 parent ba4d49c commit f571614
Showing 1 changed file with 3 additions and 1 deletion.
4 changes: 3 additions & 1 deletion vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,13 @@ class GraniteToolParser(ToolParser):

def __init__(self, tokenizer: AnyTokenizer):
super().__init__(tokenizer)
self.bot_token = "<|tool_call|>"

def extract_tool_calls(
self, model_output: str,
request: ChatCompletionRequest) -> ExtractedToolCallInformation:
stripped = model_output.strip()
# remove whitespace and the BOT token if it exists
stripped = model_output.strip().removeprefix(self.bot_token).lstrip()
if not stripped or stripped[0] != '[':
return ExtractedToolCallInformation(tools_called=False,
tool_calls=[],
Expand Down

0 comments on commit f571614

Please sign in to comment.