Skip to content

Commit

Permalink
Update Agent class
Browse files Browse the repository at this point in the history
ashpreetbedi committed Sep 26, 2024
1 parent f005280 commit 80ae78d
Showing 5 changed files with 278 additions and 191 deletions.
434 changes: 248 additions & 186 deletions phi/agent/agent.py

Large diffs are not rendered by default.

13 changes: 12 additions & 1 deletion phi/agent/response.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,21 @@
from time import time
from enum import Enum
from typing import Optional, Any, Dict, List

from pydantic import BaseModel, ConfigDict, Field

from phi.model.message import Message, MessageContext


class RunEvent(str, Enum):
"""Events that can be sent by the Agent.run() method"""

run_start = "RunStart"
intermediate_step = "IntermediateStep"
agent_response = "AgentResponse"
run_end = "RunEnd"


class RunResponse(BaseModel):
"""Response returned by Agent.run()"""

@@ -17,6 +27,7 @@ class RunResponse(BaseModel):
tools: Optional[List[Dict[str, Any]]] = None
context: Optional[List[MessageContext]] = None
model: Optional[str] = None
event: RunEvent = RunEvent.agent_response
created_at: int = Field(default_factory=lambda: int(time()))

model_config = ConfigDict(arbitrary_types_allowed=True)
model_config = ConfigDict(arbitrary_types_allowed=True, use_enum_values=True)
4 changes: 3 additions & 1 deletion phi/memory/agent.py
Original file line number Diff line number Diff line change
@@ -23,8 +23,10 @@ class AgentMemory(BaseModel):
# Messages sent to the Model and the Model responses.
run_messages: List[Message] = []

# Create personalized memories for this user
# Create personalized memories for a user
# MemoryDb to store the memories
db: Optional[MemoryDb] = None
# User ID for the memory
user_id: Optional[str] = None
retrieval: MemoryRetrieval = MemoryRetrieval.last_n
memories: Optional[List[Memory]] = None
4 changes: 4 additions & 0 deletions phi/model/base.py
Original file line number Diff line number Diff line change
@@ -54,6 +54,10 @@ class Model(BaseModel):

# Agent Session ID
session_id: Optional[str] = None
# Whether to use the structured outputs from the Model.
structured_outputs: Optional[bool] = None
# Whether the Model supports structured outputs.
supports_structured_outputs: bool = False

model_config = ConfigDict(arbitrary_types_allowed=True, populate_by_name=True)

14 changes: 11 additions & 3 deletions phi/model/openai/chat.py
Original file line number Diff line number Diff line change
@@ -46,15 +46,15 @@ class OpenAIChat(Model):
including sending requests, handling responses and running tool calls.
Attributes:
model (str): The name of the OpenAI model to use. Default is "gpt-4o".
id (str): The id of the OpenAI model to use. Default is "gpt-4o".
name (str): The name of this chat model instance. Default is "OpenAIChat".
provider (str): The provider of the model. Default is "OpenAI".
frequency_penalty (Optional[float]): Penalizes new tokens based on their frequency in the text so far.
logit_bias (Optional[Any]): Modifies the likelihood of specified tokens appearing in the completion.
logprobs (Optional[bool]): Include the log probabilities on the logprobs most likely tokens.
max_tokens (Optional[int]): The maximum number of tokens to generate in the chat completion.
presence_penalty (Optional[float]): Penalizes new tokens based on whether they appear in the text so far.
response_format (Optional[Dict[str, Any]]): Specifies the format in which the model should return its response.
response_format (Optional[Any]): An object specifying the format that the model must output.
seed (Optional[int]): A seed for deterministic sampling.
stop (Optional[Union[str, List[str]]]): Up to 4 sequences where the API will stop generating further tokens.
temperature (Optional[float]): Controls randomness in the model's output.
@@ -75,6 +75,8 @@ class OpenAIChat(Model):
client_params (Optional[Dict[str, Any]]): Additional parameters for client configuration.
client (Optional[OpenAIClient]): The OpenAI client instance.
async_client (Optional[AsyncOpenAIClient]): The asynchronous OpenAI client instance.
structured_outputs (bool): Whether to use the structured outputs from the Model. Default is False.
supports_structured_outputs (bool): Whether the Model supports structured outputs. Default is True.
"""

id: str = "gpt-4o"
@@ -87,7 +89,7 @@ class OpenAIChat(Model):
logprobs: Optional[bool] = None
max_tokens: Optional[int] = None
presence_penalty: Optional[float] = None
response_format: Optional[Dict[str, Any]] = None
response_format: Optional[Any] = None
seed: Optional[int] = None
stop: Optional[Union[str, List[str]]] = None
temperature: Optional[float] = None
@@ -113,6 +115,12 @@ class OpenAIChat(Model):
client: Optional[OpenAIClient] = None
async_client: Optional[AsyncOpenAIClient] = None

# Internal parameters: not used for API requests
# Whether to use the structured outputs from the Model.
structured_outputs: bool = False
# Whether the Model supports structured outputs.
supports_structured_outputs: bool = True

def get_client(self) -> OpenAIClient:
"""
Get or create an OpenAI client.

0 comments on commit 80ae78d

Please sign in to comment.