Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Memory Interface in AgentChat #4438

Open
wants to merge 19 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,12 @@
from .. import EVENT_LOGGER_NAME
from ..base import Handoff as HandoffBase
from ..base import Response
from ..memory._base_memory import Memory
from ..messages import (
AgentEvent,
ChatMessage,
HandoffMessage,
MemoryQueryEvent,
MultiModalMessage,
TextMessage,
ToolCallExecutionEvent,
Expand Down Expand Up @@ -253,9 +255,22 @@
) = "You are a helpful AI assistant. Solve tasks using your tools. Reply with TERMINATE when the task has been completed.",
reflect_on_tool_use: bool = False,
tool_call_summary_format: str = "{result}",
memory: List[Memory] | None = None,
):
super().__init__(name=name, description=description)
self._model_client = model_client
self._memory = None
if memory is not None:
if isinstance(memory, Memory):
self._memory = [memory]

Check warning on line 265 in python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py#L265

Added line #L265 was not covered by tests
elif isinstance(memory, list):
self._memory = memory
else:
raise TypeError(f"Expected Memory, List[Memory], or None, got {type(memory)}")

Check warning on line 269 in python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py#L269

Added line #L269 was not covered by tests

self._system_messages: List[
SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage
] = []
if system_message is None:
self._system_messages = []
else:
Expand Down Expand Up @@ -338,6 +353,15 @@
# Inner messages.
inner_messages: List[AgentEvent | ChatMessage] = []

# Update the model context with memory content.
if self._memory:
for memory in self._memory:
memory_query_result = await memory.transform(self._model_context)
if memory_query_result and len(memory_query_result) > 0:
memory_query_event_msg = MemoryQueryEvent(content=memory_query_result, source=self.name)
inner_messages.append(memory_query_event_msg)
yield memory_query_event_msg

# Generate an inference result based on the current model context.
llm_messages = self._system_messages + await self._model_context.get_messages()
result = await self._model_client.create(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from ._base_memory import Memory, MemoryContent, MemoryMimeType
from ._list_memory import ListMemory, ListMemoryConfig

__all__ = [
"Memory",
"MemoryContent",
"MemoryMimeType",
"ListMemory",
"ListMemoryConfig",
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Protocol, Union, runtime_checkable

from autogen_core import CancellationToken, Image
from autogen_core.model_context import ChatCompletionContext
from pydantic import BaseModel, ConfigDict, Field


class MemoryMimeType(Enum):
"""Supported MIME types for memory content."""

TEXT = "text/plain"
JSON = "application/json"
MARKDOWN = "text/markdown"
IMAGE = "image/*"
BINARY = "application/octet-stream"


ContentType = Union[str, bytes, Dict[str, Any], Image]


class MemoryContent(BaseModel):
content: ContentType
mime_type: MemoryMimeType | str
metadata: Dict[str, Any] | None = None
timestamp: datetime | None = None
source: str | None = None
score: float | None = None

model_config = ConfigDict(arbitrary_types_allowed=True)


class BaseMemoryConfig(BaseModel):
"""Base configuration for memory implementations."""

k: int = Field(default=5, description="Number of results to return")
score_threshold: float | None = Field(default=None, description="Minimum relevance score")

model_config = ConfigDict(arbitrary_types_allowed=True)


@runtime_checkable
class Memory(Protocol):
"""Protocol defining the interface for memory implementations."""

@property
def name(self) -> str | None:
"""The name of this memory implementation."""
...

Check warning on line 50 in python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py#L50

Added line #L50 was not covered by tests

@property
def config(self) -> BaseMemoryConfig:
"""The configuration for this memory implementation."""
...

Check warning on line 55 in python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py#L55

Added line #L55 was not covered by tests

async def transform(
self,
model_context: ChatCompletionContext,
) -> List[MemoryContent]:
"""
Transform the provided model context using relevant memory content.

Args:
model_context: The context to transform

Returns:
List of memory entries with relevance scores
"""
...

Check warning on line 70 in python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py#L70

Added line #L70 was not covered by tests

async def query(
self,
query: MemoryContent,
cancellation_token: "CancellationToken | None" = None,
**kwargs: Any,
) -> List[MemoryContent]:
"""
Query the memory store and return relevant entries.

Args:
query: Query content item
cancellation_token: Optional token to cancel operation
**kwargs: Additional implementation-specific parameters

Returns:
List of memory entries with relevance scores
"""
...

Check warning on line 89 in python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py#L89

Added line #L89 was not covered by tests

async def add(self, content: MemoryContent, cancellation_token: "CancellationToken | None" = None) -> None:
"""
Add a new content to memory.

Args:
content: The memory content to add
cancellation_token: Optional token to cancel operation
"""
...

Check warning on line 99 in python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py#L99

Added line #L99 was not covered by tests

async def clear(self) -> None:
"""Clear all entries from memory."""
...

Check warning on line 103 in python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py#L103

Added line #L103 was not covered by tests

async def cleanup(self) -> None:
"""Clean up any resources used by the memory implementation."""
...

Check warning on line 107 in python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py

View check run for this annotation

Codecov / codecov/patch

python/packages/autogen-agentchat/src/autogen_agentchat/memory/_base_memory.py#L107

Added line #L107 was not covered by tests
Loading
Loading