diff --git a/cookbook/llms/ollama/tool_call.py b/cookbook/llms/ollama/tool_call.py
index 45cba054ff..c4fca01493 100644
--- a/cookbook/llms/ollama/tool_call.py
+++ b/cookbook/llms/ollama/tool_call.py
@@ -1,10 +1,10 @@
from phi.assistant import Assistant
from phi.tools.duckduckgo import DuckDuckGo
-from phi.llm.ollama import OllamaTools
+from phi.llm.ollama import Ollama
assistant = Assistant(
- llm=OllamaTools(model="llama3"),
+ llm=Ollama(model="llama3"),
tools=[DuckDuckGo()],
show_tool_calls=True,
)
diff --git a/phi/llm/anthropic/claude_deprecated.py b/phi/llm/anthropic/claude_deprecated.py
index f0a8b5ae23..8e36a9ea66 100644
--- a/phi/llm/anthropic/claude_deprecated.py
+++ b/phi/llm/anthropic/claude_deprecated.py
@@ -82,7 +82,7 @@ def invoke(self, messages: List[Message]) -> AnthropicMessage:
return self.client.messages.create(
model=self.model,
- messages=api_messages,
+ messages=api_messages, # type: ignore
**api_kwargs,
)
@@ -98,7 +98,7 @@ def invoke_stream(self, messages: List[Message]) -> Any:
return self.client.messages.stream(
model=self.model,
- messages=api_messages,
+ messages=api_messages, # type: ignore
**api_kwargs,
)
@@ -115,7 +115,7 @@ def response(self, messages: List[Message]) -> str:
logger.debug(f"Time to generate response: {response_timer.elapsed:.4f}s")
# -*- Parse response
- response_content = response.content[0].text
+ response_content = response.content[0].text # type: ignore
# -*- Create assistant message
assistant_message = Message(
diff --git a/phi/llm/base.py b/phi/llm/base.py
index e4934c140d..1c9745c569 100644
--- a/phi/llm/base.py
+++ b/phi/llm/base.py
@@ -160,13 +160,15 @@ def run_function_calls(self, function_calls: List[FunctionCall], role: str = "to
# -*- Run function call
_function_call_timer = Timer()
_function_call_timer.start()
- function_call.execute()
+ function_call_success = function_call.execute()
_function_call_timer.stop()
+
_function_call_result = Message(
role=role,
- content=function_call.result,
+ content=function_call.result if function_call_success else function_call.error,
tool_call_id=function_call.call_id,
tool_call_name=function_call.function.name,
+ tool_call_error=not function_call_success,
metrics={"time": _function_call_timer.elapsed},
)
if "tool_call_times" not in self.metrics:
diff --git a/phi/llm/message.py b/phi/llm/message.py
index e37eb625df..35438baaf2 100644
--- a/phi/llm/message.py
+++ b/phi/llm/message.py
@@ -21,6 +21,8 @@ class Message(BaseModel):
tool_call_id: Optional[str] = None
# The name of the tool call
tool_call_name: Optional[str] = None
+ # The error of the tool call
+ tool_call_error: bool = False
# The tool calls generated by the model, such as function calls.
tool_calls: Optional[List[Dict[str, Any]]] = None
# Metrics for the message, tokes + the time it took to generate the response.
@@ -44,7 +46,9 @@ def get_content_string(self) -> str:
return ""
def to_dict(self) -> Dict[str, Any]:
- _dict = self.model_dump(exclude_none=True, exclude={"metrics", "tool_call_name", "internal_id"})
+ _dict = self.model_dump(
+ exclude_none=True, exclude={"metrics", "tool_call_name", "internal_id", "tool_call_error"}
+ )
# Manually add the content field if it is None
if self.content is None:
_dict["content"] = None
diff --git a/phi/llm/mistral/mistral.py b/phi/llm/mistral/mistral.py
index 4ca79a2236..46afa12586 100644
--- a/phi/llm/mistral/mistral.py
+++ b/phi/llm/mistral/mistral.py
@@ -167,7 +167,11 @@ def response(self, messages: List[Message]) -> str:
)
continue
if _function_call.error is not None:
- messages.append(Message(role="tool", tool_call_id=_tool_call_id, content=_function_call.error))
+ messages.append(
+ Message(
+ role="tool", tool_call_id=_tool_call_id, tool_call_error=True, content=_function_call.error
+ )
+ )
continue
function_calls_to_run.append(_function_call)
@@ -259,7 +263,11 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
)
continue
if _function_call.error is not None:
- messages.append(Message(role="tool", tool_call_id=_tool_call_id, content=_function_call.error))
+ messages.append(
+ Message(
+ role="tool", tool_call_id=_tool_call_id, tool_call_error=True, content=_function_call.error
+ )
+ )
continue
function_calls_to_run.append(_function_call)
diff --git a/phi/llm/ollama/chat.py b/phi/llm/ollama/chat.py
index 57d33aa96e..38e749f58c 100644
--- a/phi/llm/ollama/chat.py
+++ b/phi/llm/ollama/chat.py
@@ -4,6 +4,7 @@
from phi.llm.base import LLM
from phi.llm.message import Message
+from phi.llm.ollama.utils import extract_tool_calls
from phi.tools.function import FunctionCall
from phi.utils.log import logger
from phi.utils.timer import Timer
@@ -27,7 +28,7 @@ class Ollama(LLM):
client_kwargs: Optional[Dict[str, Any]] = None
ollama_client: Optional[OllamaClient] = None
# Maximum number of function calls allowed across all iterations.
- function_call_limit: int = 5
+ function_call_limit: int = 10
# Deactivate tool calls after 1 tool call
deactivate_tools_after_use: bool = False
# After a tool call is run, add the user message as a reminder to the LLM
@@ -87,14 +88,14 @@ def to_llm_message(self, message: Message) -> Dict[str, Any]:
def invoke(self, messages: List[Message]) -> Mapping[str, Any]:
return self.client.chat(
model=self.model,
- messages=[self.to_llm_message(m) for m in messages],
+ messages=[self.to_llm_message(m) for m in messages], # type: ignore
**self.api_kwargs,
- )
+ ) # type: ignore
def invoke_stream(self, messages: List[Message]) -> Iterator[Mapping[str, Any]]:
yield from self.client.chat(
model=self.model,
- messages=[self.to_llm_message(m) for m in messages],
+ messages=[self.to_llm_message(m) for m in messages], # type: ignore
stream=True,
**self.api_kwargs,
) # type: ignore
@@ -128,34 +129,39 @@ def response(self, messages: List[Message]) -> str:
role=response_role or "assistant",
content=response_content,
)
+
# Check if the response is a tool call
try:
if response_content is not None:
_tool_call_content = response_content.strip()
- if _tool_call_content.startswith("{") and _tool_call_content.endswith("}"):
- _tool_call_content_json = json.loads(_tool_call_content)
- if "tool_calls" in _tool_call_content_json:
- assistant_tool_calls = _tool_call_content_json.get("tool_calls")
- if isinstance(assistant_tool_calls, list):
- # Build tool calls
- tool_calls: List[Dict[str, Any]] = []
- logger.debug(f"Building tool calls from {assistant_tool_calls}")
- for tool_call in assistant_tool_calls:
- tool_call_name = tool_call.get("name")
- tool_call_args = tool_call.get("arguments")
- _function_def = {"name": tool_call_name}
- if tool_call_args is not None:
- _function_def["arguments"] = json.dumps(tool_call_args)
- tool_calls.append(
- {
- "type": "function",
- "function": _function_def,
- }
- )
- assistant_message.tool_calls = tool_calls
- assistant_message.role = "assistant"
+ assistant_tool_calls = extract_tool_calls(_tool_call_content)
+
+ if assistant_tool_calls.invalid_json_format:
+ assistant_message.tool_call_error = True
+
+ if assistant_tool_calls.tool_calls is not None:
+ # Build tool calls
+ tool_calls: List[Dict[str, Any]] = []
+ logger.debug(f"Building tool calls from {assistant_tool_calls}")
+ for tool_call in assistant_tool_calls.tool_calls:
+ tool_call_name = tool_call.get("name")
+ tool_call_args = tool_call.get("arguments")
+ _function_def = {"name": tool_call_name}
+ if tool_call_args is not None:
+ _function_def["arguments"] = json.dumps(tool_call_args)
+ tool_calls.append(
+ {
+ "type": "function",
+ "function": _function_def,
+ }
+ )
+
+ # Add tool calls to assistant message
+ assistant_message.tool_calls = tool_calls
+ assistant_message.role = "assistant"
except Exception:
logger.warning(f"Could not parse tool calls from response: {response_content}")
+ assistant_message.tool_call_error = True
pass
# -*- Update usage metrics
@@ -182,8 +188,16 @@ def response(self, messages: List[Message]) -> str:
assistant_message.log()
# -*- Parse and run function call
- if assistant_message.tool_calls is not None and self.run_tools:
- final_response = ""
+ final_response = ""
+ if assistant_message.tool_call_error:
+ # Add error message to the messages to let the LLM know that the tool call failed
+ messages = self.add_tool_call_error_message(messages)
+
+ # -*- Yield new response using results of tool calls
+ final_response += self.response(messages=messages)
+ return final_response
+
+ elif assistant_message.tool_calls is not None and self.run_tools:
function_calls_to_run: List[FunctionCall] = []
for tool_call in assistant_message.tool_calls:
_function_call = get_function_call_for_tool_call(tool_call, self.functions)
@@ -205,11 +219,20 @@ def response(self, messages: List[Message]) -> str:
final_response += "\n\n"
function_call_results = self.run_function_calls(function_calls_to_run, role="user")
- if len(function_call_results) > 0:
+
+ # This case rarely happens but it should be handled
+ if len(function_calls_to_run) != len(function_call_results):
+ return final_response + self.response(messages=messages)
+
+ # Add results of the function calls to the messages
+ elif len(function_call_results) > 0:
messages.extend(function_call_results)
# Reconfigure messages so the LLM is reminded of the original task
if self.add_user_message_after_tool_call:
- messages = self.add_original_user_message(messages)
+ if any(item.tool_call_error for item in function_call_results):
+ messages = self.add_tool_call_error_message(messages)
+ else:
+ messages = self.add_original_user_message(messages)
# Deactivate tool calls by turning off JSON mode after 1 tool call
if self.deactivate_tools_after_use:
@@ -218,10 +241,13 @@ def response(self, messages: List[Message]) -> str:
# -*- Yield new response using results of tool calls
final_response += self.response(messages=messages)
return final_response
+
logger.debug("---------- Ollama Response End ----------")
+
# -*- Return content if no function calls are present
if assistant_message.content is not None:
return assistant_message.get_content_string()
+
return "Something went wrong, please try again."
def response_stream(self, messages: List[Message]) -> Iterator[str]:
@@ -239,6 +265,7 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
response_metrics: Mapping[str, Any] = {}
response_timer = Timer()
response_timer.start()
+
for response in self.invoke_stream(messages=messages):
completion_tokens += 1
if completion_tokens == 1:
@@ -257,8 +284,10 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
assistant_message_content += response_content
# Strip out tool calls from the response
- # If the response is a tool call, it will start with a {
- if not response_is_tool_call and assistant_message_content.strip().startswith("{"):
+ extract_tool_calls_result = extract_tool_calls(assistant_message_content)
+ if not response_is_tool_call and (
+ extract_tool_calls_result.tool_calls is not None or extract_tool_calls_result.invalid_json_format
+ ):
response_is_tool_call = True
# If the response is a tool call, count the number of brackets
@@ -299,33 +328,38 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
role="assistant",
content=assistant_message_content,
)
+
# Check if the response is a tool call
try:
if response_is_tool_call and assistant_message_content != "":
_tool_call_content = assistant_message_content.strip()
- if _tool_call_content.startswith("{") and _tool_call_content.endswith("}"):
- _tool_call_content_json = json.loads(_tool_call_content)
- if "tool_calls" in _tool_call_content_json:
- assistant_tool_calls = _tool_call_content_json.get("tool_calls")
- if isinstance(assistant_tool_calls, list):
- # Build tool calls
- tool_calls: List[Dict[str, Any]] = []
- logger.debug(f"Building tool calls from {assistant_tool_calls}")
- for tool_call in assistant_tool_calls:
- tool_call_name = tool_call.get("name")
- tool_call_args = tool_call.get("arguments")
- _function_def = {"name": tool_call_name}
- if tool_call_args is not None:
- _function_def["arguments"] = json.dumps(tool_call_args)
- tool_calls.append(
- {
- "type": "function",
- "function": _function_def,
- }
- )
- assistant_message.tool_calls = tool_calls
+ assistant_tool_calls = extract_tool_calls(_tool_call_content)
+
+ if assistant_tool_calls.invalid_json_format:
+ assistant_message.tool_call_error = True
+
+ if not assistant_message.tool_call_error and assistant_tool_calls.tool_calls is not None:
+ # Build tool calls
+ tool_calls: List[Dict[str, Any]] = []
+ logger.debug(f"Building tool calls from {assistant_tool_calls.tool_calls}")
+ for tool_call in assistant_tool_calls.tool_calls:
+ tool_call_name = tool_call.get("name")
+ tool_call_args = tool_call.get("arguments")
+ _function_def = {"name": tool_call_name}
+ if tool_call_args is not None:
+ _function_def["arguments"] = json.dumps(tool_call_args)
+ tool_calls.append(
+ {
+ "type": "function",
+ "function": _function_def,
+ }
+ )
+
+ # Add tool calls to assistant message
+ assistant_message.tool_calls = tool_calls
except Exception:
logger.warning(f"Could not parse tool calls from response: {assistant_message_content}")
+ assistant_message.tool_call_error = True
pass
# -*- Update usage metrics
@@ -364,7 +398,14 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
assistant_message.log()
# -*- Parse and run function call
- if assistant_message.tool_calls is not None and self.run_tools:
+ if assistant_message.tool_call_error:
+ # Add error message to the messages to let the LLM know that the tool call failed
+ messages = self.add_tool_call_error_message(messages)
+
+ # -*- Yield new response using results of tool calls
+ yield from self.response_stream(messages=messages)
+
+ elif assistant_message.tool_calls is not None and self.run_tools:
function_calls_to_run: List[FunctionCall] = []
for tool_call in assistant_message.tool_calls:
_function_call = get_function_call_for_tool_call(tool_call, self.functions)
@@ -386,12 +427,21 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
yield "\n\n"
function_call_results = self.run_function_calls(function_calls_to_run, role="user")
+
+ # This case rarely happens but it should be handled
+ if len(function_calls_to_run) != len(function_call_results):
+ messages = self.add_tool_call_error_message(messages)
+
# Add results of the function calls to the messages
- if len(function_call_results) > 0:
+ elif len(function_call_results) > 0:
messages.extend(function_call_results)
+
# Reconfigure messages so the LLM is reminded of the original task
if self.add_user_message_after_tool_call:
- messages = self.add_original_user_message(messages)
+ if any(item.tool_call_error for item in function_call_results):
+ messages = self.add_tool_call_error_message(messages)
+ else:
+ messages = self.add_original_user_message(messages)
# Deactivate tool calls by turning off JSON mode after 1 tool call
if self.deactivate_tools_after_use:
@@ -399,6 +449,7 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
# -*- Yield new response using results of tool calls
yield from self.response_stream(messages=messages)
+
logger.debug("---------- Ollama Response End ----------")
def add_original_user_message(self, messages: List[Message]) -> List[Message]:
@@ -408,34 +459,82 @@ def add_original_user_message(self, messages: List[Message]) -> List[Message]:
if m.role == "user":
original_user_message_content = m.content
break
+
if original_user_message_content is not None:
_content = (
- "Using the results of the tools above, respond to the following message:"
+ "Using the results of the tools above, respond to the following message. "
+ "If the user explicitly requests raw data or specific formats like JSON, provide it as requested. "
+ "Otherwise, use the tool results to provide a clear and relevant answer without "
+ "returning the raw results directly:"
f"\n\n\n{original_user_message_content}\n"
)
+
messages.append(Message(role="user", content=_content))
return messages
+ def add_tool_call_error_message(self, messages: List[Message]) -> List[Message]:
+ # Add error message to the messages to let the LLM know that the tool call failed
+ content = (
+ "Output from the tool indicates an arguments error, take a step back and adjust the tool arguments "
+ "then use the same tool again with the new arguments. "
+ "Ensure the response does not mention any failed tool calls, Just the adjusted tool calls."
+ )
+ messages.append(Message(role="user", tool_call_error=True, content=content))
+ return messages
+
def get_instructions_to_generate_tool_calls(self) -> List[str]:
if self.functions is not None:
return [
"To respond to the users message, you can use one or more of the tools provided above.",
+ # Tool usage instructions
"If you decide to use a tool, you must respond in the JSON format matching the following schema:\n"
+ dedent(
"""\
{
- "tool_calls": [{
- "name": "",
- "arguments": ",
+ "arguments":
+ }
+ ]
}\
"""
),
- "To use a tool, just respond with the JSON matching the schema. Nothing else. Do not add any additional notes or explanations",
- "After you use a tool, the next message you get will contain the result of the tool call.",
- "REMEMBER: To use a tool, you must respond only in JSON format.",
- "After you use a tool and receive the result back, respond regularly to answer the users question.",
+ "REMEMBER: To use a tool, you MUST respond ONLY in JSON format.",
+ (
+ "REMEMBER: You can use multiple tools in a single response if necessary, "
+ 'by including multiple entries in the "tool_calls" array.'
+ ),
+ "You may use the same tool multiple times in a single response, but only with different arguments.",
+ (
+ "To use a tool, ONLY respond with the JSON matching the schema. Nothing else. "
+ "Do not add any additional notes or explanations"
+ ),
+ (
+ "REMEMBER: The ONLY valid way to use this tool is to ensure the ENTIRE response is in JSON format, "
+ "matching the specified schema."
+ ),
+ "Do not inform the user that you used a tool in your response.",
+ "Do not suggest tools to use in your responses. You should use them to obtain answers.",
+ "Ensure each tool use is formatted correctly and independently.",
+ 'REMEMBER: The "arguments" field must contain valid parameters as per the tool\'s JSON schema.',
+ "Ensure accuracy by using tools to obtain your answers, avoiding assumptions about tool output.",
+ # Response instructions
+ "After you use a tool, the next message you get will contain the result of the tool use.",
+ "If the result of one tool requires using another tool, use needed tool first and then use the result.",
+ (
+ "If the result from a tool indicates an input error, "
+ "You must adjust the parameters and try use the tool again."
+ ),
+ (
+ "If the tool results are used in your response, you do not need to mention the knowledge cutoff. "
+ "Use the information directly from the tool's output, which is assumed to be up-to-date."
+ ),
+ (
+ "After you use a tool and receive the result back, take a step back and provide clear and relevant "
+ "answers based on the user's query and tool results."
+ ),
]
return []
diff --git a/phi/llm/ollama/hermes.py b/phi/llm/ollama/hermes.py
index 7a66067a76..aa5b786477 100644
--- a/phi/llm/ollama/hermes.py
+++ b/phi/llm/ollama/hermes.py
@@ -89,14 +89,14 @@ def to_llm_message(self, message: Message) -> Dict[str, Any]:
def invoke(self, messages: List[Message]) -> Mapping[str, Any]:
return self.client.chat(
model=self.model,
- messages=[self.to_llm_message(m) for m in messages],
+ messages=[self.to_llm_message(m) for m in messages], # type: ignore
**self.api_kwargs,
- )
+ ) # type: ignore
def invoke_stream(self, messages: List[Message]) -> Iterator[Mapping[str, Any]]:
yield from self.client.chat(
model=self.model,
- messages=[self.to_llm_message(m) for m in messages],
+ messages=[self.to_llm_message(m) for m in messages], # type: ignore
stream=True,
**self.api_kwargs,
) # type: ignore
@@ -194,7 +194,7 @@ def response(self, messages: List[Message]) -> str:
messages.append(Message(role="user", content="Could not find function to call."))
continue
if _function_call.error is not None:
- messages.append(Message(role="user", content=_function_call.error))
+ messages.append(Message(role="user", tool_call_error=True, content=_function_call.error))
continue
function_calls_to_run.append(_function_call)
diff --git a/phi/llm/ollama/tools.py b/phi/llm/ollama/tools.py
index 1fe5f872ce..6c7ef683ed 100644
--- a/phi/llm/ollama/tools.py
+++ b/phi/llm/ollama/tools.py
@@ -90,14 +90,14 @@ def to_llm_message(self, message: Message) -> Dict[str, Any]:
def invoke(self, messages: List[Message]) -> Mapping[str, Any]:
return self.client.chat(
model=self.model,
- messages=[self.to_llm_message(m) for m in messages],
+ messages=[self.to_llm_message(m) for m in messages], # type: ignore
**self.api_kwargs,
- )
+ ) # type: ignore
def invoke_stream(self, messages: List[Message]) -> Iterator[Mapping[str, Any]]:
yield from self.client.chat(
model=self.model,
- messages=[self.to_llm_message(m) for m in messages],
+ messages=[self.to_llm_message(m) for m in messages], # type: ignore
stream=True,
**self.api_kwargs,
) # type: ignore
@@ -195,7 +195,7 @@ def response(self, messages: List[Message]) -> str:
messages.append(Message(role="user", content="Could not find function to call."))
continue
if _function_call.error is not None:
- messages.append(Message(role="user", content=_function_call.error))
+ messages.append(Message(role="user", tool_call_error=True, content=_function_call.error))
continue
function_calls_to_run.append(_function_call)
diff --git a/phi/llm/ollama/utils.py b/phi/llm/ollama/utils.py
new file mode 100644
index 0000000000..2ee3358b2b
--- /dev/null
+++ b/phi/llm/ollama/utils.py
@@ -0,0 +1,85 @@
+import json
+from typing import Optional, Dict, Literal, Union
+
+from pydantic import BaseModel
+
+
+class MessageToolCallExtractionResult(BaseModel):
+ tool_calls: Optional[list] = None
+ invalid_json_format: bool = False
+
+
+def extract_json(s: str) -> Union[Optional[Dict], Literal[False]]:
+ """
+ Extracts all valid JSON from a string then combines them and returns it as a dictionary.
+
+ Args:
+ s: The string to extract JSON from.
+
+ Returns:
+ A dictionary containing the extracted JSON, or None if no JSON was found or False if an invalid JSON was found.
+ """
+ json_objects = []
+ start_idx = 0
+
+ while start_idx < len(s):
+ # Find the next '{' which indicates the start of a JSON block
+ json_start = s.find("{", start_idx)
+ if json_start == -1:
+ break # No more JSON objects found
+
+ # Find the matching '}' for the found '{'
+ stack = []
+ i = json_start
+ while i < len(s):
+ if s[i] == "{":
+ stack.append("{")
+ elif s[i] == "}":
+ if stack:
+ stack.pop()
+ if not stack:
+ json_end = i
+ break
+ i += 1
+ else:
+ return False
+
+ json_str = s[json_start : json_end + 1]
+ try:
+ json_obj = json.loads(json_str)
+ json_objects.append(json_obj)
+ except ValueError:
+ return False
+
+ start_idx = json_end + 1
+
+ if not json_objects:
+ return None
+
+ # Combine all JSON objects into one
+ combined_json = {}
+ for obj in json_objects:
+ for key, value in obj.items():
+ if key not in combined_json:
+ combined_json[key] = value
+ elif isinstance(value, list) and isinstance(combined_json[key], list):
+ combined_json[key].extend(value)
+
+ return combined_json
+
+
+def extract_tool_calls(assistant_msg_content: str) -> MessageToolCallExtractionResult:
+ json_obj = extract_json(assistant_msg_content)
+ if json_obj is None:
+ return MessageToolCallExtractionResult()
+
+ if json_obj is False or not isinstance(json_obj, dict):
+ return MessageToolCallExtractionResult(invalid_json_format=True)
+
+ tool_calls: Optional[list] = json_obj.get("tool_calls")
+
+ # Not tool call json object
+ if not isinstance(tool_calls, list):
+ return MessageToolCallExtractionResult(invalid_json_format=True)
+
+ return MessageToolCallExtractionResult(tool_calls=tool_calls)
diff --git a/phi/llm/openai/chat.py b/phi/llm/openai/chat.py
index 95645288b9..43479cefd7 100644
--- a/phi/llm/openai/chat.py
+++ b/phi/llm/openai/chat.py
@@ -246,7 +246,7 @@ def run_function(self, function_call: Dict[str, Any]) -> Tuple[Message, Optional
if _function_call is None:
return Message(role="function", content="Could not find function to call."), None
if _function_call.error is not None:
- return Message(role="function", content=_function_call.error), _function_call
+ return Message(role="function", tool_call_error=True, content=_function_call.error), _function_call
if self.function_call_stack is None:
self.function_call_stack = []
@@ -263,12 +263,13 @@ def run_function(self, function_call: Dict[str, Any]) -> Tuple[Message, Optional
self.function_call_stack.append(_function_call)
_function_call_timer = Timer()
_function_call_timer.start()
- _function_call.execute()
+ function_call_success = _function_call.execute()
_function_call_timer.stop()
_function_call_message = Message(
role="function",
name=_function_call.function.name,
- content=_function_call.result,
+ content=_function_call.result if function_call_success else _function_call.error,
+ tool_call_error=not function_call_success,
metrics={"time": _function_call_timer.elapsed},
)
if "function_call_times" not in self.metrics:
diff --git a/phi/llm/together/together.py b/phi/llm/together/together.py
index eae8e82fac..eb4c04aca4 100644
--- a/phi/llm/together/together.py
+++ b/phi/llm/together/together.py
@@ -124,7 +124,11 @@ def response_stream(self, messages: List[Message]) -> Iterator[str]:
)
continue
if _function_call.error is not None:
- messages.append(Message(role="tool", tool_call_id=_tool_call_id, content=_function_call.error))
+ messages.append(
+ Message(
+ role="tool", tool_call_id=_tool_call_id, tool_call_error=True, content=_function_call.error
+ )
+ )
continue
function_calls_to_run.append(_function_call)
diff --git a/phi/tools/function.py b/phi/tools/function.py
index 71ba067acb..835876ad02 100644
--- a/phi/tools/function.py
+++ b/phi/tools/function.py
@@ -143,7 +143,7 @@ def execute(self) -> bool:
except Exception as e:
logger.warning(f"Could not run function {self.get_call_str()}")
logger.exception(e)
- self.result = str(e)
+ self.error = str(e)
return False
try:
@@ -152,5 +152,5 @@ def execute(self) -> bool:
except Exception as e:
logger.warning(f"Could not run function {self.get_call_str()}")
logger.exception(e)
- self.result = str(e)
+ self.error = str(e)
return False
diff --git a/phi/utils/functions.py b/phi/utils/functions.py
index 6a423b85be..953f684023 100644
--- a/phi/utils/functions.py
+++ b/phi/utils/functions.py
@@ -37,7 +37,10 @@ def get_function_call(
_arguments = json.loads(arguments)
except Exception as e:
logger.error(f"Unable to decode function arguments:\n{arguments}\nError: {e}")
- function_call.error = f"Error while decoding function arguments: {e}\n\n Please make sure we can json.loads() the arguments and retry."
+ function_call.error = (
+ f"Error while decoding function arguments: {e}\n\n"
+ f"Please make sure we can json.loads() the arguments and retry."
+ )
return function_call
if not isinstance(_arguments, dict):
diff --git a/pyproject.toml b/pyproject.toml
index 11aa48743d..f82d51d36a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "phidata"
-version = "2.4.24"
+version = "2.4.25"
description = "Memory, knowledge and tools for LLMs."
requires-python = ">=3.7"
readme = "README.md"