diff --git a/phi/assistant/assistant.py b/phi/assistant/assistant.py index 3e36335f2..606eae28a 100644 --- a/phi/assistant/assistant.py +++ b/phi/assistant/assistant.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Dict, List from phi.task.task import Task from phi.task.llm import LLMTask @@ -9,12 +9,17 @@ class Assistant(LLMTask): name: str = "assistant" description: Optional[str] = None - def get_delegation_function(self, task: Task) -> Function: + def get_delegation_function( + self, task: Task, assistant_responses: Optional[Dict[str, List[str]]] = None + ) -> Function: # Update assistant task self.conversation_id = task.conversation_id self.conversation_memory = task.conversation_memory self.conversation_message = task.conversation_message self.conversation_tasks = task.conversation_tasks + self.conversation_responses = task.conversation_responses + self.conversation_response_iterator = task.conversation_response_iterator + self.parse_output = False # Prepare the delegation function f_name = f"run_{self.name}" @@ -34,7 +39,14 @@ def get_delegation_function(self, task: Task) -> Function: """ def delegation_function(task_description: str): - return self.run(message=task_description, stream=False) + assistant_response = self.run(message=task_description, stream=False) + + if self.show_output and assistant_responses is not None: + if self.__class__.__name__ not in assistant_responses: + assistant_responses[self.__class__.__name__] = [] + assistant_responses[self.__class__.__name__].append(assistant_response) # type: ignore + + return assistant_response _f = Function.from_callable(delegation_function) _f.name = f_name diff --git a/phi/assistant/openai/__init__.py b/phi/assistant/openai/__init__.py index 96b6f9a71..5ce7a606b 100644 --- a/phi/assistant/openai/__init__.py +++ b/phi/assistant/openai/__init__.py @@ -1 +1 @@ -from phi.assistant.openai.assistant import OpenAiAssistant +from phi.assistant.openai.assistant import OpenAIAssistant diff --git a/phi/assistant/openai/assistant.py b/phi/assistant/openai/assistant.py index a675f168c..a5993e17d 100644 --- a/phi/assistant/openai/assistant.py +++ b/phi/assistant/openai/assistant.py @@ -18,13 +18,13 @@ raise -class OpenAiAssistant(BaseModel): +class OpenAIAssistant(BaseModel): # -*- LLM settings model: str = "gpt-4-1106-preview" openai: Optional[OpenAI] = None - # -*- OpenAiAssistant settings - # OpenAiAssistant id which can be referenced in API endpoints. + # -*- OpenAIAssistant settings + # OpenAIAssistant id which can be referenced in API endpoints. id: Optional[str] = None # The object type, populated by the API. Always assistant. object: Optional[str] = None @@ -35,15 +35,15 @@ class OpenAiAssistant(BaseModel): # The system instructions that the assistant uses. The maximum length is 32768 characters. instructions: Optional[str] = None - # -*- OpenAiAssistant Tools + # -*- OpenAIAssistant Tools # A list of tools provided to the assistant. There can be a maximum of 128 tools per assistant. # Tools can be of types code_interpreter, retrieval, or function. tools: Optional[List[Union[Tool, ToolRegistry, Callable, Dict]]] = None - # -*- Functions available to the OpenAiAssistant to call + # -*- Functions available to the OpenAIAssistant to call # Functions extracted from the tools which can be executed locally by the assistant. functions: Optional[Dict[str, Function]] = None - # -*- OpenAiAssistant Files + # -*- OpenAIAssistant Files # A list of file IDs attached to this assistant. # There can be a maximum of 20 files attached to the assistant. # Files are ordered by their creation date in ascending order. @@ -51,14 +51,14 @@ class OpenAiAssistant(BaseModel): # Files attached to this assistant. files: Optional[List[File]] = None - # -*- OpenAiAssistant Storage + # -*- OpenAIAssistant Storage # storage: Optional[AssistantStorage] = None # Create table if it doesn't exist # create_storage: bool = True # AssistantRow from the database: DO NOT SET THIS MANUALLY # database_row: Optional[AssistantRow] = None - # -*- OpenAiAssistant Knowledge Base + # -*- OpenAIAssistant Knowledge Base # knowledge_base: Optional[KnowledgeBase] = None # Set of 16 key-value pairs that can be attached to an object. @@ -92,18 +92,18 @@ def client(self) -> OpenAI: return self.openai or OpenAI() @model_validator(mode="after") - def extract_functions_from_tools(self) -> "OpenAiAssistant": + def extract_functions_from_tools(self) -> "OpenAIAssistant": if self.tools is not None: for tool in self.tools: if self.functions is None: self.functions = {} if isinstance(tool, ToolRegistry): self.functions.update(tool.functions) - logger.debug(f"Functions from {tool.name} added to OpenAiAssistant.") + logger.debug(f"Functions from {tool.name} added to OpenAIAssistant.") elif callable(tool): f = Function.from_callable(tool) self.functions[f.name] = f - logger.debug(f"Function {f.name} added to OpenAiAssistant") + logger.debug(f"Function {f.name} added to OpenAIAssistant") return self def __enter__(self): @@ -137,7 +137,7 @@ def get_tools_for_api(self) -> Optional[List[Dict[str, Any]]]: tools_for_api.append({"type": "function", "function": _f.to_dict()}) return tools_for_api - def create(self) -> "OpenAiAssistant": + def create(self) -> "OpenAIAssistant": request_body: Dict[str, Any] = {} if self.name is not None: request_body["name"] = self.name @@ -163,7 +163,7 @@ def create(self) -> "OpenAiAssistant": **request_body, ) self.load_from_openai(self.openai_assistant) - logger.debug(f"OpenAiAssistant created: {self.id}") + logger.debug(f"OpenAIAssistant created: {self.id}") return self def get_id(self) -> Optional[str]: @@ -172,7 +172,7 @@ def get_id(self) -> Optional[str]: def get_from_openai(self) -> OpenAIAssistantType: _assistant_id = self.get_id() if _assistant_id is None: - raise AssistantIdNotSet("OpenAiAssistant.id not set") + raise AssistantIdNotSet("OpenAIAssistant.id not set") self.openai_assistant = self.client.beta.assistants.retrieve( assistant_id=_assistant_id, @@ -180,20 +180,20 @@ def get_from_openai(self) -> OpenAIAssistantType: self.load_from_openai(self.openai_assistant) return self.openai_assistant - def get(self, use_cache: bool = True) -> "OpenAiAssistant": + def get(self, use_cache: bool = True) -> "OpenAIAssistant": if self.openai_assistant is not None and use_cache: return self self.get_from_openai() return self - def get_or_create(self, use_cache: bool = True) -> "OpenAiAssistant": + def get_or_create(self, use_cache: bool = True) -> "OpenAIAssistant": try: return self.get(use_cache=use_cache) except AssistantIdNotSet: return self.create() - def update(self) -> "OpenAiAssistant": + def update(self) -> "OpenAIAssistant": try: assistant_to_update = self.get_from_openai() if assistant_to_update is not None: @@ -227,11 +227,11 @@ def update(self) -> "OpenAiAssistant": **request_body, ) self.load_from_openai(self.openai_assistant) - logger.debug(f"OpenAiAssistant updated: {self.id}") + logger.debug(f"OpenAIAssistant updated: {self.id}") return self - raise ValueError("OpenAiAssistant not available") + raise ValueError("OpenAIAssistant not available") except AssistantIdNotSet: - logger.warning("OpenAiAssistant not available") + logger.warning("OpenAIAssistant not available") raise def delete(self) -> OpenAIAssistantDeleted: @@ -241,10 +241,10 @@ def delete(self) -> OpenAIAssistantDeleted: deletion_status = self.client.beta.assistants.delete( assistant_id=assistant_to_delete.id, ) - logger.debug(f"OpenAiAssistant deleted: {deletion_status.id}") + logger.debug(f"OpenAIAssistant deleted: {deletion_status.id}") return deletion_status except AssistantIdNotSet: - logger.warning("OpenAiAssistant not available") + logger.warning("OpenAIAssistant not available") raise def to_dict(self) -> Dict[str, Any]: @@ -275,7 +275,7 @@ def __str__(self) -> str: return json.dumps(self.to_dict(), indent=4) def __repr__(self) -> str: - return f"" + return f"" # # def run(self, thread: Optional["Thread"]) -> "Thread": diff --git a/phi/assistant/openai/row.py b/phi/assistant/openai/row.py index 65ebe08b2..9f1d7453d 100644 --- a/phi/assistant/openai/row.py +++ b/phi/assistant/openai/row.py @@ -4,9 +4,9 @@ class AssistantRow(BaseModel): - """Interface between OpenAiAssistant class and the database""" + """Interface between OpenAIAssistant class and the database""" - # OpenAiAssistant id which can be referenced in API endpoints. + # OpenAIAssistant id which can be referenced in API endpoints. id: str # The object type, which is always assistant. object: str @@ -18,13 +18,13 @@ class AssistantRow(BaseModel): instructions: Optional[str] = None # LLM data (name, model, etc.) llm: Optional[Dict[str, Any]] = None - # OpenAiAssistant Tools + # OpenAIAssistant Tools tools: Optional[List[Dict[str, Any]]] = None # Files attached to this assistant. files: Optional[List[Dict[str, Any]]] = None # Metadata attached to this assistant. metadata: Optional[Dict[str, Any]] = None - # OpenAiAssistant Memory + # OpenAIAssistant Memory memory: Optional[Dict[str, Any]] = None # True if this assistant is active is_active: Optional[bool] = None diff --git a/phi/assistant/openai/run.py b/phi/assistant/openai/run.py index f78af4513..bee9992d9 100644 --- a/phi/assistant/openai/run.py +++ b/phi/assistant/openai/run.py @@ -3,7 +3,7 @@ from pydantic import BaseModel, ConfigDict, model_validator -from phi.assistant.openai.assistant import OpenAiAssistant +from phi.assistant.openai.assistant import OpenAIAssistant from phi.assistant.openai.exceptions import ThreadIdNotSet, AssistantIdNotSet, RunIdNotSet from phi.tools import Tool, ToolRegistry from phi.tools.function import Function @@ -33,8 +33,8 @@ class Run(BaseModel): # The ID of the thread that was executed on as a part of this run. thread_id: Optional[str] = None - # OpenAiAssistant used for this run - assistant: Optional[OpenAiAssistant] = None + # OpenAIAssistant used for this run + assistant: Optional[OpenAIAssistant] = None # The ID of the assistant used for execution of this run. assistant_id: Optional[str] = None @@ -106,11 +106,11 @@ def extract_functions_from_tools(self) -> "Run": self.functions = {} if isinstance(tool, ToolRegistry): self.functions.update(tool.functions) - logger.debug(f"Functions from {tool.name} added to OpenAiAssistant.") + logger.debug(f"Functions from {tool.name} added to OpenAIAssistant.") elif callable(tool): f = Function.from_callable(tool) self.functions[f.name] = f - logger.debug(f"Function {f.name} added to OpenAiAssistant") + logger.debug(f"Function {f.name} added to OpenAIAssistant") return self def load_from_openai(self, openai_run: OpenAIRun): @@ -149,7 +149,7 @@ def get_tools_for_api(self) -> Optional[List[Dict[str, Any]]]: def create( self, thread_id: Optional[str] = None, - assistant: Optional[OpenAiAssistant] = None, + assistant: Optional[OpenAIAssistant] = None, assistant_id: Optional[str] = None, ) -> "Run": _thread_id = thread_id or self.thread_id @@ -160,7 +160,7 @@ def create( if _assistant_id is None: _assistant_id = self.assistant.get_id() if self.assistant is not None else self.assistant_id if _assistant_id is None: - raise AssistantIdNotSet("OpenAiAssistant.id not set") + raise AssistantIdNotSet("OpenAIAssistant.id not set") request_body: Dict[str, Any] = {} if self.model is not None: @@ -209,7 +209,7 @@ def get_or_create( self, use_cache: bool = True, thread_id: Optional[str] = None, - assistant: Optional[OpenAiAssistant] = None, + assistant: Optional[OpenAIAssistant] = None, assistant_id: Optional[str] = None, ) -> "Run": try: @@ -267,7 +267,7 @@ def wait( def run( self, thread_id: Optional[str] = None, - assistant: Optional[OpenAiAssistant] = None, + assistant: Optional[OpenAIAssistant] = None, assistant_id: Optional[str] = None, wait: bool = True, callback: Optional[Callable[[OpenAIRun], None]] = None, @@ -287,7 +287,7 @@ def run( # -*- Check if run requires action if self.status == "requires_action": if self.assistant is None: - logger.warning("OpenAiAssistant not available to complete required_action") + logger.warning("OpenAIAssistant not available to complete required_action") return self if self.required_action is not None: if self.required_action.type == "submit_tool_outputs": diff --git a/phi/assistant/openai/thread.py b/phi/assistant/openai/thread.py index 87ed57281..f82c6f611 100644 --- a/phi/assistant/openai/thread.py +++ b/phi/assistant/openai/thread.py @@ -4,13 +4,13 @@ from phi.assistant.openai.run import Run from phi.assistant.openai.message import Message -from phi.assistant.openai.assistant import OpenAiAssistant +from phi.assistant.openai.assistant import OpenAIAssistant from phi.assistant.openai.exceptions import ThreadIdNotSet from phi.utils.log import logger try: from openai import OpenAI - from openai.types.beta.assistant import Assistant as OpenAIAssistant + from openai.types.beta.assistant import Assistant as OpenAIAssistantType from openai.types.beta.thread import Thread as OpenAIThread from openai.types.beta.thread_deleted import ThreadDeleted as OpenAIThreadDeleted except ImportError: @@ -25,8 +25,8 @@ class Thread(BaseModel): # The object type, populated by the API. Always thread. object: Optional[str] = None - # OpenAiAssistant used for this thread - assistant: Optional[OpenAiAssistant] = None + # OpenAIAssistant used for this thread + assistant: Optional[OpenAIAssistant] = None # The ID of the assistant for this thread. assistant_id: Optional[str] = None @@ -42,7 +42,7 @@ class Thread(BaseModel): openai: Optional[OpenAI] = None openai_thread: Optional[OpenAIThread] = None - openai_assistant: Optional[OpenAIAssistant] = None + openai_assistant: Optional[OpenAIAssistantType] = None model_config = ConfigDict(arbitrary_types_allowed=True) @@ -162,7 +162,7 @@ def add(self, messages: List[Union[Message, Dict]]) -> None: def run( self, message: Optional[Union[str, Message]] = None, - assistant: Optional[OpenAiAssistant] = None, + assistant: Optional[OpenAIAssistant] = None, assistant_id: Optional[str] = None, run: Optional[Run] = None, wait: bool = True, @@ -232,7 +232,7 @@ def print_messages(self) -> None: table.add_column("User") table.add_column(m.get_content_with_files()) elif m.role == "assistant": - table.add_row("OpenAiAssistant", Markdown(m.get_content_with_files())) + table.add_row("OpenAIAssistant", Markdown(m.get_content_with_files())) table.add_section() else: table.add_row(m.role, Markdown(m.get_content_with_files())) @@ -240,7 +240,7 @@ def print_messages(self) -> None: console.print(table) def print_response( - self, message: str, assistant: OpenAiAssistant, current_message_only: bool = False, markdown: bool = False + self, message: str, assistant: OpenAIAssistant, current_message_only: bool = False, markdown: bool = False ) -> None: from rich.progress import Progress, SpinnerColumn, TextColumn @@ -263,7 +263,7 @@ def print_response( total_messages = len(response_messages) for idx, response_message in enumerate(response_messages[::-1], start=1): response_message.pprint( - title=f"[bold] :robot: OpenAiAssistant ({idx}/{total_messages}) [/bold]", markdown=markdown + title=f"[bold] :robot: OpenAIAssistant ({idx}/{total_messages}) [/bold]", markdown=markdown ) else: for m in self.messages[::-1]: diff --git a/phi/assistant/python.py b/phi/assistant/python.py index 1d9834c8a..027eb00e8 100644 --- a/phi/assistant/python.py +++ b/phi/assistant/python.py @@ -81,12 +81,20 @@ def get_file_metadata(self) -> str: def get_instructions(self) -> str: _instructions = [ "Determine if you can answer the question directly or if you need to run python code to accomplish the task.", - "If you need to run code, first **THINK** about how you will accomplish the task. No need to explain your reasoning.", + "If you need to run code, **THINK STEP BY STEP** and explain your reasoning.", "If you need access to data, check the `files` below to see if you have the data you need.", "If you do not have the data you need, stop and prompt the user to provide the missing information.", - "Once you have all the information, create python functions to accomplishes the task.", - "DO NOT READ THE DATA FILES DIRECTLY. Only read them in the python code you write.", + "Once you have all the information, create python functions to accomplish your task.", + 'After you have all the functions, create 1 single python file that runs the functions guarded by a `if __name__ == "__main__"` block.', ] + if self.save_and_run: + _instructions += [ + "After the python file is ready, save and run it using the `save_to_file_and_run` function." + ] + _instructions += ["Make sure you specify the `file_name` and `variable_to_return` parameter correctly"] + if self.run_code: + _instructions += ["After the script is ready, run it using the `run_python_code` function."] + if self.charting_libraries: if "streamlit" in self.charting_libraries: _instructions += [ @@ -103,14 +111,6 @@ def get_instructions(self) -> str: _instructions += [ f"You may use the following charting libraries: {', '.join(self.charting_libraries)}", ] - _instructions += [ - 'After you have all the functions, create a python script that runs the functions guarded by a `if __name__ == "__main__"` block.' - ] - if self.save_and_run: - _instructions += ["After the script is ready, save and run it using the `save_to_file_and_run` function."] - _instructions += ["Make sure you specify the `variable_to_return` parameter correctly"] - if self.run_code: - _instructions += ["After the script is ready, run it using the `run_python_code` function."] _instructions += ["Continue till you have accomplished the task."] @@ -134,6 +134,7 @@ def get_instructions(self) -> str: - Even if you know the answer, you MUST get the answer using Python code. - Refuse to delete any data, or drop anything sensitive. + - DO NOT READ THE DATA FILES DIRECTLY. Only read them in the python code you write. """ ) diff --git a/phi/conversation/conversation.py b/phi/conversation/conversation.py index e1a94ec2b..0f106d6ca 100644 --- a/phi/conversation/conversation.py +++ b/phi/conversation/conversation.py @@ -1,6 +1,7 @@ import json from uuid import uuid4 from datetime import datetime +from collections import OrderedDict from typing import List, Any, Optional, Dict, Iterator, Callable, Union, Type, Tuple from pydantic import BaseModel, ConfigDict, field_validator, Field, ValidationError @@ -97,6 +98,7 @@ class Conversation(BaseModel): # -*- Conversation Assistants assistants: Optional[List[Assistant]] = None + show_assistant_responses: bool = False # # -*- Prompt Settings @@ -352,13 +354,17 @@ def end(self) -> None: self.storage.end(conversation_id=self.id) self.is_active = False - def get_delegation_functions_for_task(self, task: Task) -> Optional[List[Function]]: + def get_delegation_functions_for_task( + self, task: Task, assistant_responses: Optional[Dict[str, List[str]]] = None + ) -> Optional[List[Function]]: if self.assistants is None or len(self.assistants) == 0: return None delegation_functions: List[Function] = [] for assistant in self.assistants: - delegation_functions.append(assistant.get_delegation_function(task=task)) + delegation_functions.append( + assistant.get_delegation_function(task=task, assistant_responses=assistant_responses) + ) return delegation_functions def _run(self, message: Optional[Union[List[Dict], str]] = None, stream: bool = True) -> Iterator[str]: @@ -373,11 +379,13 @@ def _run(self, message: Optional[Union[List[Dict], str]] = None, stream: bool = # meta_data for all tasks in this run conversation_tasks: List[Dict[str, Any]] = [] + # Final LLM response after running all tasks + conversation_run_response = "" + assistant_responses: Dict[str, List[str]] = OrderedDict() + # Messages for this run # TODO: remove this when frontend is updated run_messages: List[Message] = [] - # Complete LLM response after running all tasks - llm_response = "" # -*- Generate response by running tasks current_task: Optional[Task] = None @@ -415,7 +423,9 @@ def _run(self, message: Optional[Union[List[Dict], str]] = None, stream: bool = current_task.llm = self.llm.model_copy() # Add delegation functions to the task - delegation_functions = self.get_delegation_functions_for_task(task=current_task) + delegation_functions = self.get_delegation_functions_for_task( + task=current_task, assistant_responses=assistant_responses + ) if delegation_functions and len(delegation_functions) > 0: if current_task.tools is None: current_task.tools = [] @@ -425,11 +435,11 @@ def _run(self, message: Optional[Union[List[Dict], str]] = None, stream: bool = if stream and current_task.streamable: for chunk in current_task.run(message=current_task_message, stream=True): if current_task.show_output: - llm_response += chunk if isinstance(chunk, str) else "" + conversation_run_response += chunk if isinstance(chunk, str) else "" yield chunk if isinstance(chunk, str) else "" if current_task.show_output: yield "\n\n" - llm_response += "\n\n" + conversation_run_response += "\n\n" else: current_task_response = current_task.run(message=current_task_message, stream=False) # type: ignore current_task_response_str = "" @@ -449,8 +459,8 @@ def _run(self, message: Optional[Union[List[Dict], str]] = None, stream: bool = yield current_task_response_str yield "\n\n" else: - llm_response += current_task_response_str - llm_response += "\n\n" + conversation_run_response += current_task_response_str + conversation_run_response += "\n\n" except Exception as e: logger.debug(f"Failed to convert response to json: {e}") @@ -458,6 +468,18 @@ def _run(self, message: Optional[Union[List[Dict], str]] = None, stream: bool = if isinstance(current_task, LLMTask): run_messages.extend(current_task.memory.llm_messages) + # -*- Show assistant responses + if self.show_assistant_responses and len(assistant_responses) > 0: + assistant_responses_str = "" + for assistant_name, assistant_response_list in assistant_responses.items(): + assistant_responses_str += f"{assistant_name}:\n" + for assistant_response in assistant_response_list: + assistant_responses_str += f"\n{assistant_response}\n" + if stream: + yield assistant_responses_str + else: + conversation_run_response += assistant_responses_str + # -*- Save conversation to storage self.write_to_storage() @@ -468,18 +490,18 @@ def _run(self, message: Optional[Union[List[Dict], str]] = None, stream: bool = } event_data = { "user_message": message, - "llm_response": llm_response, + "llm_response": conversation_run_response, "info": event_info, "metrics": self.llm.metrics, } self._api_log_conversation_event(event_type="run", event_data=event_data) # -*- Update conversation output - self.output = llm_response + self.output = conversation_run_response # -*- Yield final response if not streaming if not stream: - yield llm_response + yield conversation_run_response logger.debug(f"*********** Conversation End: {self.id} ***********") def run( diff --git a/phi/task/llm/llm_task.py b/phi/task/llm/llm_task.py index ac732b8fe..521beae31 100644 --- a/phi/task/llm/llm_task.py +++ b/phi/task/llm/llm_task.py @@ -408,14 +408,14 @@ def _run( messages += self.memory.get_last_n_messages(last_n=self.num_history_messages) messages += [user_prompt_message] - # -*- Generate response (includes running function calls) - llm_response = "" + # -*- Generate run response (includes running function calls) + task_run_response = "" if stream: for response_chunk in self.llm.parsed_response_stream(messages=messages): - llm_response += response_chunk + task_run_response += response_chunk yield response_chunk else: - llm_response = self.llm.parsed_response(messages=messages) + task_run_response = self.llm.parsed_response(messages=messages) # -*- Update task memory # Add user message to the task memory - this is added to the chat_history @@ -424,7 +424,7 @@ def _run( # Add llm messages to the task memory - this is added to the llm_messages self.memory.add_llm_messages(messages=messages) # Add llm response to the chat history - llm_message = Message(role="assistant", content=llm_response) + llm_message = Message(role="assistant", content=task_run_response) self.memory.add_chat_message(message=llm_message) # Add references to the task memory if references: @@ -447,11 +447,12 @@ def _run( self.conversation_tasks.append(self.to_dict()) # -*- Update task output - self.output = llm_response + self.output = task_run_response + logger.debug(f"task_run_response: {task_run_response}") # -*- Yield final response if not streaming if not stream: - yield llm_response + yield task_run_response logger.debug(f"*********** Task End: {self.id} ***********") diff --git a/phi/task/task.py b/phi/task/task.py index a37b4e2d5..746d411fb 100644 --- a/phi/task/task.py +++ b/phi/task/task.py @@ -4,6 +4,7 @@ from pydantic import BaseModel, ConfigDict, field_validator from phi.memory.conversation import ConversationMemory +from phi.utils.response_iterator import ResponseIterator from phi.utils.log import logger, set_log_level_to_debug @@ -19,6 +20,8 @@ class Task(BaseModel): conversation_memory: Optional[ConversationMemory] = None conversation_message: Optional[Union[List[Dict], str]] = None conversation_tasks: Optional[List[Dict[str, Any]]] = None + conversation_responses: List[str] = [] + conversation_response_iterator: ResponseIterator = ResponseIterator() # -*- Output Settings # Output model for the responses diff --git a/phi/utils/response_iterator.py b/phi/utils/response_iterator.py new file mode 100644 index 000000000..7d2a11187 --- /dev/null +++ b/phi/utils/response_iterator.py @@ -0,0 +1,17 @@ +class ResponseIterator: + def __init__(self): + self.items = [] + self.index = 0 + + def add(self, item): + self.items.append(item) + + def __iter__(self): + return self + + def __next__(self): + if self.index >= len(self.items): + raise StopIteration + item = self.items[self.index] + self.index += 1 + return item